transfer support kafka (#227)

* 修改:     etc/transfer.yml
	修改:     go.mod
	修改:     go.sum
	修改:     src/modules/transfer/backend/init.go
	新文件:   src/modules/transfer/backend/kafka.go
	修改:     src/modules/transfer/backend/sender.go
	修改:     src/modules/transfer/http/routes/push_router.go
	修改:     src/modules/transfer/rpc/push.go
	新文件:   vendor/github.com/Shopify/sarama/.gitignore
	新文件:   vendor/github.com/Shopify/sarama/.golangci.yml
	新文件:   vendor/github.com/Shopify/sarama/CHANGELOG.md
	新文件:   vendor/github.com/Shopify/sarama/LICENSE
	新文件:   vendor/github.com/Shopify/sarama/Makefile
	新文件:   vendor/github.com/Shopify/sarama/README.md
	新文件:   vendor/github.com/Shopify/sarama/Vagrantfile
	新文件:   vendor/github.com/Shopify/sarama/acl_bindings.go
	新文件:   vendor/github.com/Shopify/sarama/acl_create_request.go
	新文件:   vendor/github.com/Shopify/sarama/acl_create_response.go
	新文件:   vendor/github.com/Shopify/sarama/acl_delete_request.go
	新文件:   vendor/github.com/Shopify/sarama/acl_delete_response.go
	新文件:   vendor/github.com/Shopify/sarama/acl_describe_request.go
	新文件:   vendor/github.com/Shopify/sarama/acl_describe_response.go
	新文件:   vendor/github.com/Shopify/sarama/acl_filter.go
	新文件:   vendor/github.com/Shopify/sarama/acl_types.go
	新文件:   vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go
	新文件:   vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go
	新文件:   vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go
	新文件:   vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go
	新文件:   vendor/github.com/Shopify/sarama/admin.go
	新文件:   vendor/github.com/Shopify/sarama/alter_configs_request.go
	新文件:   vendor/github.com/Shopify/sarama/alter_configs_response.go
	新文件:   vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go
	新文件:   vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go
	新文件:   vendor/github.com/Shopify/sarama/api_versions_request.go
	新文件:   vendor/github.com/Shopify/sarama/api_versions_response.go
	新文件:   vendor/github.com/Shopify/sarama/async_producer.go
	新文件:   vendor/github.com/Shopify/sarama/balance_strategy.go
	新文件:   vendor/github.com/Shopify/sarama/broker.go
	新文件:   vendor/github.com/Shopify/sarama/client.go
	新文件:   vendor/github.com/Shopify/sarama/compress.go
	新文件:   vendor/github.com/Shopify/sarama/config.go
	新文件:   vendor/github.com/Shopify/sarama/config_resource_type.go
	新文件:   vendor/github.com/Shopify/sarama/consumer.go
	新文件:   vendor/github.com/Shopify/sarama/consumer_group.go
	新文件:   vendor/github.com/Shopify/sarama/consumer_group_members.go
	新文件:   vendor/github.com/Shopify/sarama/consumer_metadata_request.go
	新文件:   vendor/github.com/Shopify/sarama/consumer_metadata_response.go
	新文件:   vendor/github.com/Shopify/sarama/control_record.go
	新文件:   vendor/github.com/Shopify/sarama/crc32_field.go
	新文件:   vendor/github.com/Shopify/sarama/create_partitions_request.go
	新文件:   vendor/github.com/Shopify/sarama/create_partitions_response.go
	新文件:   vendor/github.com/Shopify/sarama/create_topics_request.go
	新文件:   vendor/github.com/Shopify/sarama/create_topics_response.go
	新文件:   vendor/github.com/Shopify/sarama/decompress.go
	新文件:   vendor/github.com/Shopify/sarama/delete_groups_request.go
	新文件:   vendor/github.com/Shopify/sarama/delete_groups_response.go
	新文件:   vendor/github.com/Shopify/sarama/delete_records_request.go
	新文件:   vendor/github.com/Shopify/sarama/delete_records_response.go
	新文件:   vendor/github.com/Shopify/sarama/delete_topics_request.go
	新文件:   vendor/github.com/Shopify/sarama/delete_topics_response.go
	新文件:   vendor/github.com/Shopify/sarama/describe_configs_request.go
	新文件:   vendor/github.com/Shopify/sarama/describe_configs_response.go
	新文件:   vendor/github.com/Shopify/sarama/describe_groups_request.go
	新文件:   vendor/github.com/Shopify/sarama/describe_groups_response.go
	新文件:   vendor/github.com/Shopify/sarama/describe_log_dirs_request.go
	新文件:   vendor/github.com/Shopify/sarama/describe_log_dirs_response.go
	新文件:   vendor/github.com/Shopify/sarama/dev.yml
	新文件:   vendor/github.com/Shopify/sarama/encoder_decoder.go
	新文件:   vendor/github.com/Shopify/sarama/end_txn_request.go
	新文件:   vendor/github.com/Shopify/sarama/end_txn_response.go
	新文件:   vendor/github.com/Shopify/sarama/errors.go
	新文件:   vendor/github.com/Shopify/sarama/fetch_request.go
	新文件:   vendor/github.com/Shopify/sarama/fetch_response.go
	新文件:   vendor/github.com/Shopify/sarama/find_coordinator_request.go
	新文件:   vendor/github.com/Shopify/sarama/find_coordinator_response.go
	新文件:   vendor/github.com/Shopify/sarama/go.mod
	新文件:   vendor/github.com/Shopify/sarama/go.sum
	新文件:   vendor/github.com/Shopify/sarama/gssapi_kerberos.go
	新文件:   vendor/github.com/Shopify/sarama/heartbeat_request.go
	新文件:   vendor/github.com/Shopify/sarama/heartbeat_response.go
	新文件:   vendor/github.com/Shopify/sarama/init_producer_id_request.go
	新文件:   vendor/github.com/Shopify/sarama/init_producer_id_response.go
	新文件:   vendor/github.com/Shopify/sarama/join_group_request.go
	新文件:   vendor/github.com/Shopify/sarama/join_group_response.go
	新文件:   vendor/github.com/Shopify/sarama/kerberos_client.go
	新文件:   vendor/github.com/Shopify/sarama/leave_group_request.go
	新文件:   vendor/github.com/Shopify/sarama/leave_group_response.go
	新文件:   vendor/github.com/Shopify/sarama/length_field.go
	新文件:   vendor/github.com/Shopify/sarama/list_groups_request.go
	新文件:   vendor/github.com/Shopify/sarama/list_groups_response.go
	新文件:   vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go
	新文件:   vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go
	新文件:   vendor/github.com/Shopify/sarama/message.go
	新文件:   vendor/github.com/Shopify/sarama/message_set.go
	新文件:   vendor/github.com/Shopify/sarama/metadata_request.go
	新文件:   vendor/github.com/Shopify/sarama/metadata_response.go
	新文件:   vendor/github.com/Shopify/sarama/metrics.go
	新文件:   vendor/github.com/Shopify/sarama/mockbroker.go
	新文件:   vendor/github.com/Shopify/sarama/mockkerberos.go
	新文件:   vendor/github.com/Shopify/sarama/mockresponses.go
	新文件:   vendor/github.com/Shopify/sarama/offset_commit_request.go
	新文件:   vendor/github.com/Shopify/sarama/offset_commit_response.go
	新文件:   vendor/github.com/Shopify/sarama/offset_fetch_request.go
	新文件:   vendor/github.com/Shopify/sarama/offset_fetch_response.go
	新文件:   vendor/github.com/Shopify/sarama/offset_manager.go
	新文件:   vendor/github.com/Shopify/sarama/offset_request.go
	新文件:   vendor/github.com/Shopify/sarama/offset_response.go
	新文件:   vendor/github.com/Shopify/sarama/packet_decoder.go
	新文件:   vendor/github.com/Shopify/sarama/packet_encoder.go
	新文件:   vendor/github.com/Shopify/sarama/partitioner.go
	新文件:   vendor/github.com/Shopify/sarama/prep_encoder.go
	新文件:   vendor/github.com/Shopify/sarama/produce_request.go
	新文件:   vendor/github.com/Shopify/sarama/produce_response.go
	新文件:   vendor/github.com/Shopify/sarama/produce_set.go
	新文件:   vendor/github.com/Shopify/sarama/real_decoder.go
	新文件:   vendor/github.com/Shopify/sarama/real_encoder.go
	新文件:   vendor/github.com/Shopify/sarama/record.go
	新文件:   vendor/github.com/Shopify/sarama/record_batch.go
	新文件:   vendor/github.com/Shopify/sarama/records.go
	新文件:   vendor/github.com/Shopify/sarama/request.go
	新文件:   vendor/github.com/Shopify/sarama/response_header.go
	新文件:   vendor/github.com/Shopify/sarama/sarama.go
	新文件:   vendor/github.com/Shopify/sarama/sasl_authenticate_request.go
	新文件:   vendor/github.com/Shopify/sarama/sasl_authenticate_response.go
	新文件:   vendor/github.com/Shopify/sarama/sasl_handshake_request.go
	新文件:   vendor/github.com/Shopify/sarama/sasl_handshake_response.go
	新文件:   vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go
	新文件:   vendor/github.com/Shopify/sarama/sync_group_request.go
	新文件:   vendor/github.com/Shopify/sarama/sync_group_response.go
	新文件:   vendor/github.com/Shopify/sarama/sync_producer.go
	新文件:   vendor/github.com/Shopify/sarama/timestamp.go
	新文件:   vendor/github.com/Shopify/sarama/txn_offset_commit_request.go
	新文件:   vendor/github.com/Shopify/sarama/txn_offset_commit_response.go
	新文件:   vendor/github.com/Shopify/sarama/utils.go
	新文件:   vendor/github.com/Shopify/sarama/zstd.go
	新文件:   vendor/github.com/eapache/go-resiliency/LICENSE
	新文件:   vendor/github.com/eapache/go-resiliency/breaker/README.md
	新文件:   vendor/github.com/eapache/go-resiliency/breaker/breaker.go
	新文件:   vendor/github.com/eapache/go-xerial-snappy/.gitignore
	新文件:   vendor/github.com/eapache/go-xerial-snappy/.travis.yml
	新文件:   vendor/github.com/eapache/go-xerial-snappy/LICENSE
	新文件:   vendor/github.com/eapache/go-xerial-snappy/README.md
	新文件:   vendor/github.com/eapache/go-xerial-snappy/fuzz.go
	新文件:   vendor/github.com/eapache/go-xerial-snappy/snappy.go
	新文件:   vendor/github.com/eapache/queue/.gitignore
	新文件:   vendor/github.com/eapache/queue/.travis.yml
	新文件:   vendor/github.com/eapache/queue/LICENSE
	新文件:   vendor/github.com/eapache/queue/README.md
	新文件:   vendor/github.com/eapache/queue/queue.go
	新文件:   vendor/github.com/golang/snappy/.gitignore
	新文件:   vendor/github.com/golang/snappy/AUTHORS
	新文件:   vendor/github.com/golang/snappy/CONTRIBUTORS
	新文件:   vendor/github.com/golang/snappy/LICENSE
	新文件:   vendor/github.com/golang/snappy/README
	新文件:   vendor/github.com/golang/snappy/decode.go
	新文件:   vendor/github.com/golang/snappy/decode_amd64.go
	新文件:   vendor/github.com/golang/snappy/decode_amd64.s
	新文件:   vendor/github.com/golang/snappy/decode_other.go
	新文件:   vendor/github.com/golang/snappy/encode.go
	新文件:   vendor/github.com/golang/snappy/encode_amd64.go
	新文件:   vendor/github.com/golang/snappy/encode_amd64.s
	新文件:   vendor/github.com/golang/snappy/encode_other.go
	新文件:   vendor/github.com/golang/snappy/go.mod
	新文件:   vendor/github.com/golang/snappy/snappy.go
	新文件:   vendor/github.com/hashicorp/go-uuid/.travis.yml
	新文件:   vendor/github.com/hashicorp/go-uuid/LICENSE
	新文件:   vendor/github.com/hashicorp/go-uuid/README.md
	新文件:   vendor/github.com/hashicorp/go-uuid/go.mod
	新文件:   vendor/github.com/hashicorp/go-uuid/uuid.go
	新文件:   vendor/github.com/jcmturner/gofork/LICENSE
	新文件:   vendor/github.com/jcmturner/gofork/encoding/asn1/README.md
	新文件:   vendor/github.com/jcmturner/gofork/encoding/asn1/asn1.go
	新文件:   vendor/github.com/jcmturner/gofork/encoding/asn1/common.go
	新文件:   vendor/github.com/jcmturner/gofork/encoding/asn1/marshal.go
	新文件:   vendor/github.com/jcmturner/gofork/x/crypto/pbkdf2/pbkdf2.go
	新文件:   vendor/github.com/klauspost/compress/LICENSE
	新文件:   vendor/github.com/klauspost/compress/fse/README.md
	新文件:   vendor/github.com/klauspost/compress/fse/bitreader.go
	新文件:   vendor/github.com/klauspost/compress/fse/bitwriter.go
	新文件:   vendor/github.com/klauspost/compress/fse/bytereader.go
	新文件:   vendor/github.com/klauspost/compress/fse/compress.go
	新文件:   vendor/github.com/klauspost/compress/fse/decompress.go
	新文件:   vendor/github.com/klauspost/compress/fse/fse.go
	新文件:   vendor/github.com/klauspost/compress/huff0/.gitignore
	新文件:   vendor/github.com/klauspost/compress/huff0/README.md
	新文件:   vendor/github.com/klauspost/compress/huff0/bitreader.go
	新文件:   vendor/github.com/klauspost/compress/huff0/bitwriter.go
	新文件:   vendor/github.com/klauspost/compress/huff0/bytereader.go
	新文件:   vendor/github.com/klauspost/compress/huff0/compress.go
	新文件:   vendor/github.com/klauspost/compress/huff0/decompress.go
	新文件:   vendor/github.com/klauspost/compress/huff0/huff0.go
	新文件:   vendor/github.com/klauspost/compress/snappy/.gitignore
	新文件:   vendor/github.com/klauspost/compress/snappy/AUTHORS
	新文件:   vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS
	新文件:   vendor/github.com/klauspost/compress/snappy/LICENSE
	新文件:   vendor/github.com/klauspost/compress/snappy/README
	新文件:   vendor/github.com/klauspost/compress/snappy/decode.go
	新文件:   vendor/github.com/klauspost/compress/snappy/decode_amd64.go
	新文件:   vendor/github.com/klauspost/compress/snappy/decode_amd64.s
	新文件:   vendor/github.com/klauspost/compress/snappy/decode_other.go
	新文件:   vendor/github.com/klauspost/compress/snappy/encode.go
	新文件:   vendor/github.com/klauspost/compress/snappy/encode_amd64.go
	新文件:   vendor/github.com/klauspost/compress/snappy/encode_amd64.s
	新文件:   vendor/github.com/klauspost/compress/snappy/encode_other.go
	新文件:   vendor/github.com/klauspost/compress/snappy/runbench.cmd
	新文件:   vendor/github.com/klauspost/compress/snappy/snappy.go
	新文件:   vendor/github.com/klauspost/compress/zstd/README.md
	新文件:   vendor/github.com/klauspost/compress/zstd/bitreader.go
	新文件:   vendor/github.com/klauspost/compress/zstd/bitwriter.go
	新文件:   vendor/github.com/klauspost/compress/zstd/blockdec.go
	新文件:   vendor/github.com/klauspost/compress/zstd/blockenc.go
	新文件:   vendor/github.com/klauspost/compress/zstd/blocktype_string.go
	新文件:   vendor/github.com/klauspost/compress/zstd/bytebuf.go
	新文件:   vendor/github.com/klauspost/compress/zstd/bytereader.go
	新文件:   vendor/github.com/klauspost/compress/zstd/decoder.go
	新文件:   vendor/github.com/klauspost/compress/zstd/decoder_options.go
	新文件:   vendor/github.com/klauspost/compress/zstd/enc_dfast.go
	新文件:   vendor/github.com/klauspost/compress/zstd/enc_fast.go
	新文件:   vendor/github.com/klauspost/compress/zstd/enc_params.go
	新文件:   vendor/github.com/klauspost/compress/zstd/encoder.go
	新文件:   vendor/github.com/klauspost/compress/zstd/encoder_options.go
	新文件:   vendor/github.com/klauspost/compress/zstd/framedec.go
	新文件:   vendor/github.com/klauspost/compress/zstd/frameenc.go
	新文件:   vendor/github.com/klauspost/compress/zstd/fse_decoder.go
	新文件:   vendor/github.com/klauspost/compress/zstd/fse_encoder.go
	新文件:   vendor/github.com/klauspost/compress/zstd/fse_predefined.go
	新文件:   vendor/github.com/klauspost/compress/zstd/hash.go
	新文件:   vendor/github.com/klauspost/compress/zstd/history.go
	新文件:   vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt
	新文件:   vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md
	新文件:   vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
	新文件:   vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go
	新文件:   vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
	新文件:   vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
	新文件:   vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go
	新文件:   vendor/github.com/klauspost/compress/zstd/seqdec.go
	新文件:   vendor/github.com/klauspost/compress/zstd/seqenc.go
	新文件:   vendor/github.com/klauspost/compress/zstd/snappy.go
	新文件:   vendor/github.com/klauspost/compress/zstd/zstd.go
	新文件:   vendor/github.com/pierrec/lz4/.gitignore
	新文件:   vendor/github.com/pierrec/lz4/.travis.yml
	新文件:   vendor/github.com/pierrec/lz4/LICENSE
	新文件:   vendor/github.com/pierrec/lz4/README.md
	新文件:   vendor/github.com/pierrec/lz4/block.go
	新文件:   vendor/github.com/pierrec/lz4/debug.go
	新文件:   vendor/github.com/pierrec/lz4/debug_stub.go
	新文件:   vendor/github.com/pierrec/lz4/decode_amd64.go
	新文件:   vendor/github.com/pierrec/lz4/decode_amd64.s
	新文件:   vendor/github.com/pierrec/lz4/decode_other.go
	新文件:   vendor/github.com/pierrec/lz4/errors.go
	新文件:   vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go
	新文件:   vendor/github.com/pierrec/lz4/lz4.go
	新文件:   vendor/github.com/pierrec/lz4/lz4_go1.10.go
	新文件:   vendor/github.com/pierrec/lz4/lz4_notgo1.10.go
	新文件:   vendor/github.com/pierrec/lz4/reader.go
	新文件:   vendor/github.com/pierrec/lz4/writer.go
	新文件:   vendor/github.com/rcrowley/go-metrics/.gitignore
	新文件:   vendor/github.com/rcrowley/go-metrics/.travis.yml
	新文件:   vendor/github.com/rcrowley/go-metrics/LICENSE
	新文件:   vendor/github.com/rcrowley/go-metrics/README.md
	新文件:   vendor/github.com/rcrowley/go-metrics/counter.go
	新文件:   vendor/github.com/rcrowley/go-metrics/debug.go
	新文件:   vendor/github.com/rcrowley/go-metrics/ewma.go
	新文件:   vendor/github.com/rcrowley/go-metrics/gauge.go
	新文件:   vendor/github.com/rcrowley/go-metrics/gauge_float64.go
	新文件:   vendor/github.com/rcrowley/go-metrics/graphite.go
	新文件:   vendor/github.com/rcrowley/go-metrics/healthcheck.go
	新文件:   vendor/github.com/rcrowley/go-metrics/histogram.go
	新文件:   vendor/github.com/rcrowley/go-metrics/json.go
	新文件:   vendor/github.com/rcrowley/go-metrics/log.go
	新文件:   vendor/github.com/rcrowley/go-metrics/memory.md
	新文件:   vendor/github.com/rcrowley/go-metrics/meter.go
	新文件:   vendor/github.com/rcrowley/go-metrics/metrics.go
	新文件:   vendor/github.com/rcrowley/go-metrics/opentsdb.go
	新文件:   vendor/github.com/rcrowley/go-metrics/registry.go
	新文件:   vendor/github.com/rcrowley/go-metrics/runtime.go
	新文件:   vendor/github.com/rcrowley/go-metrics/runtime_cgo.go
	新文件:   vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go
	新文件:   vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go
	新文件:   vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go
	新文件:   vendor/github.com/rcrowley/go-metrics/sample.go
	新文件:   vendor/github.com/rcrowley/go-metrics/syslog.go
	新文件:   vendor/github.com/rcrowley/go-metrics/timer.go
	新文件:   vendor/github.com/rcrowley/go-metrics/validate.sh
	新文件:   vendor/github.com/rcrowley/go-metrics/writer.go
	删除:     vendor/github.com/shirou/gopsutil/mem/types_openbsd.go
	删除:     vendor/github.com/shirou/gopsutil/process/types_darwin.go
	删除:     vendor/github.com/shirou/gopsutil/process/types_freebsd.go
	删除:     vendor/github.com/shirou/gopsutil/process/types_openbsd.go
	删除:     vendor/github.com/ugorji/go/codec/xml.go
	新文件:   vendor/golang.org/x/crypto/AUTHORS
	新文件:   vendor/golang.org/x/crypto/CONTRIBUTORS
	新文件:   vendor/golang.org/x/crypto/LICENSE
	新文件:   vendor/golang.org/x/crypto/PATENTS
	新文件:   vendor/golang.org/x/crypto/md4/md4.go
	新文件:   vendor/golang.org/x/crypto/md4/md4block.go
	新文件:   vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
	新文件:   vendor/golang.org/x/net/AUTHORS
	新文件:   vendor/golang.org/x/net/CONTRIBUTORS
	新文件:   vendor/golang.org/x/net/LICENSE
	新文件:   vendor/golang.org/x/net/PATENTS
	新文件:   vendor/golang.org/x/net/internal/socks/client.go
	新文件:   vendor/golang.org/x/net/internal/socks/socks.go
	新文件:   vendor/golang.org/x/net/proxy/dial.go
	新文件:   vendor/golang.org/x/net/proxy/direct.go
	新文件:   vendor/golang.org/x/net/proxy/per_host.go
	新文件:   vendor/golang.org/x/net/proxy/proxy.go
	新文件:   vendor/golang.org/x/net/proxy/socks5.go
	删除:     vendor/golang.org/x/sys/unix/mkasm_darwin.go
	删除:     vendor/golang.org/x/sys/unix/mkpost.go
	删除:     vendor/golang.org/x/sys/unix/mksyscall.go
	删除:     vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go
	删除:     vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go
	删除:     vendor/golang.org/x/sys/unix/mksyscall_solaris.go
	删除:     vendor/golang.org/x/sys/unix/mksysctl_openbsd.go
	删除:     vendor/golang.org/x/sys/unix/mksysnum.go
	删除:     vendor/golang.org/x/sys/unix/types_aix.go
	删除:     vendor/golang.org/x/sys/unix/types_darwin.go
	删除:     vendor/golang.org/x/sys/unix/types_dragonfly.go
	删除:     vendor/golang.org/x/sys/unix/types_freebsd.go
	删除:     vendor/golang.org/x/sys/unix/types_netbsd.go
	删除:     vendor/golang.org/x/sys/unix/types_openbsd.go
	删除:     vendor/golang.org/x/sys/unix/types_solaris.go
	删除:     vendor/golang.org/x/text/unicode/norm/maketables.go
	删除:     vendor/golang.org/x/text/unicode/norm/triegen.go
	删除:     vendor/golang.org/x/tools/go/gcexportdata/main.go
	新文件:   vendor/gopkg.in/jcmturner/aescts.v1/.gitignore
	新文件:   vendor/gopkg.in/jcmturner/aescts.v1/LICENSE
	新文件:   vendor/gopkg.in/jcmturner/aescts.v1/README.md
	新文件:   vendor/gopkg.in/jcmturner/aescts.v1/aescts.go
	新文件:   vendor/gopkg.in/jcmturner/dnsutils.v1/.gitignore
	新文件:   vendor/gopkg.in/jcmturner/dnsutils.v1/.travis.yml
	新文件:   vendor/gopkg.in/jcmturner/dnsutils.v1/LICENSE
	新文件:   vendor/gopkg.in/jcmturner/dnsutils.v1/srv.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/LICENSE
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/asn1tools/tools.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/client/ASExchange.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/client/TGSExchange.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/client/cache.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/client/client.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/client/network.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/client/passwd.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/client/session.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/client/settings.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/config/error.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/config/hosts.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/config/krb5conf.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/credentials/ccache.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/credentials/credentials.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/aes128-cts-hmac-sha1-96.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/aes128-cts-hmac-sha256-128.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/aes256-cts-hmac-sha1-96.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/aes256-cts-hmac-sha384-192.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/common/common.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/crypto.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/des3-cbc-sha1-kd.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/etype/etype.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rc4-hmac.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3961/encryption.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3961/keyDerivation.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3961/nfold.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3962/encryption.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3962/keyDerivation.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc4757/checksum.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc4757/encryption.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc4757/keyDerivation.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc4757/msgtype.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc8009/encryption.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc8009/keyDerivation.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/MICToken.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/README.md
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/contextFlags.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/gssapi.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/wrapToken.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/iana/addrtype/constants.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/iana/adtype/constants.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/iana/asnAppTag/constants.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype/constants.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/iana/constants.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/iana/errorcode/constants.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/iana/etypeID/constants.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/iana/flags/constants.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/iana/keyusage/constants.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/iana/msgtype/constants.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/iana/nametype/constants.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/iana/patype/constants.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/kadmin/changepasswddata.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/kadmin/message.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/kadmin/passwd.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/keytab/keytab.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/krberror/error.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/messages/APRep.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/messages/APReq.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KDCRep.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KDCReq.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KRBCred.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KRBError.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KRBPriv.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KRBSafe.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/messages/Ticket.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/pac/client_claims.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/pac/client_info.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/pac/credentials_info.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/pac/device_claims.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/pac/device_info.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/pac/kerb_validation_info.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/pac/pac_type.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/pac/s4u_delegation_info.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/pac/signature_data.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/pac/supplemental_cred.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/pac/upn_dns_info.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/types/Authenticator.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/types/AuthorizationData.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/types/Cryptosystem.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/types/HostAddress.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/types/KerberosFlags.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/types/PAData.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/types/PrincipalName.go
	新文件:   vendor/gopkg.in/jcmturner/gokrb5.v7/types/TypedData.go
	新文件:   vendor/gopkg.in/jcmturner/rpc.v1/LICENSE
	新文件:   vendor/gopkg.in/jcmturner/rpc.v1/mstypes/claims.go
	新文件:   vendor/gopkg.in/jcmturner/rpc.v1/mstypes/common.go
	新文件:   vendor/gopkg.in/jcmturner/rpc.v1/mstypes/filetime.go
	新文件:   vendor/gopkg.in/jcmturner/rpc.v1/mstypes/group_membership.go
	新文件:   vendor/gopkg.in/jcmturner/rpc.v1/mstypes/kerb_sid_and_attributes.go
	新文件:   vendor/gopkg.in/jcmturner/rpc.v1/mstypes/reader.go
	新文件:   vendor/gopkg.in/jcmturner/rpc.v1/mstypes/rpc_unicode_string.go
	新文件:   vendor/gopkg.in/jcmturner/rpc.v1/mstypes/sid.go
	新文件:   vendor/gopkg.in/jcmturner/rpc.v1/mstypes/user_session_key.go
	新文件:   vendor/gopkg.in/jcmturner/rpc.v1/ndr/arrays.go
	新文件:   vendor/gopkg.in/jcmturner/rpc.v1/ndr/decoder.go
	新文件:   vendor/gopkg.in/jcmturner/rpc.v1/ndr/error.go
	新文件:   vendor/gopkg.in/jcmturner/rpc.v1/ndr/header.go
	新文件:   vendor/gopkg.in/jcmturner/rpc.v1/ndr/pipe.go
	新文件:   vendor/gopkg.in/jcmturner/rpc.v1/ndr/primitives.go
	新文件:   vendor/gopkg.in/jcmturner/rpc.v1/ndr/rawbytes.go
	新文件:   vendor/gopkg.in/jcmturner/rpc.v1/ndr/strings.go
	新文件:   vendor/gopkg.in/jcmturner/rpc.v1/ndr/tags.go
	新文件:   vendor/gopkg.in/jcmturner/rpc.v1/ndr/union.go
	修改:     vendor/gopkg.in/yaml.v2/.travis.yml
	修改:     vendor/gopkg.in/yaml.v2/decode.go
	修改:     vendor/gopkg.in/yaml.v2/scannerc.go
	修改:     vendor/gopkg.in/yaml.v2/yaml.go
	修改:     vendor/gopkg.in/yaml.v2/yamlh.go
	修改:     vendor/modules.txt

* Update sender.go

* Update sender.go

* Update kafka.go

* Update kafka.go

Co-authored-by: 马涛 <matao@staff.sina.com.cn>
This commit is contained in:
mt 2020-06-26 12:07:44 +08:00 committed by GitHub
parent 7d5d791376
commit 163c116871
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
445 changed files with 66427 additions and 6730 deletions

View File

@ -16,8 +16,12 @@ backend:
opentsdb:
enabled: false
address: "127.0.0.1:4242"
kafka:
enabled: false
brokersPeers: "192.168.1.1:9092,192.168.1.2:9092"
topic: "n9e"
logger:
dir: logs/transfer
level: WARNING
keepHours: 2
keepHours: 2

3
go.mod
View File

@ -3,6 +3,8 @@ module github.com/didi/nightingale
go 1.12
require (
github.com/Shopify/sarama v1.26.4
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect
github.com/cespare/xxhash v1.1.0
github.com/codegangsta/negroni v1.0.0
github.com/dgryski/go-tsz v0.0.0-20180227144327-03b7d791f4fe
@ -10,6 +12,7 @@ require (
github.com/gin-contrib/pprof v1.2.1
github.com/gin-contrib/sessions v0.0.3
github.com/gin-gonic/gin v1.5.0
github.com/go-ole/go-ole v1.2.4 // indirect
github.com/go-sql-driver/mysql v1.4.1
github.com/gorilla/mux v1.6.2
github.com/hpcloud/tail v1.0.0

58
go.sum
View File

@ -9,6 +9,7 @@ cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxK
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
cloud.google.com/go v0.51.0 h1:PvKAVQWCtlGUSlZkGW3QLelKaWq7KYv/MW1EboG8bfM=
cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
@ -26,8 +27,14 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/Shopify/sarama v1.19.0 h1:9oksLxC6uxVPHPVYUmq6xhr1BOF/hHobWH2UzO67z1s=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/sarama v1.26.4 h1:+17TxUq/PJEAfZAll0T7XJjSgQWCpaQSoki/x5yN8o8=
github.com/Shopify/sarama v1.26.4/go.mod h1:NbSGBSSndYaIhRcBtY9V0U7AyH+x71bG668AuWys/yU=
github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk=
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
@ -68,8 +75,13 @@ github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMa
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dgryski/go-tsz v0.0.0-20180227144327-03b7d791f4fe h1:VOrqop9SqFzqwZpROEOZpIufuLEUoJ3reNhdOdC9Zzw=
github.com/dgryski/go-tsz v0.0.0-20180227144327-03b7d791f4fe/go.mod h1:ft6P746mYUFQBCsH3OkFBG8FtjLx1XclLMo+9Jh1Yts=
github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU=
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q=
github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts=
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaIu9ajZQe4kcEY9j0NsnDDBZ3o=
@ -77,6 +89,10 @@ github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZi
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3BTYk=
github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/garyburd/redigo v1.6.0 h1:0VruCpn7yAIIu7pWVClQC8wxCJEcG3nyzpMSHKi1PQc=
@ -100,6 +116,8 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI=
github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM=
github.com/go-playground/locales v0.12.1 h1:2FITxuFt/xuCNP1Acdhv62OzaCiviiE4kotfhkmOqEc=
github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM=
github.com/go-playground/universal-translator v0.16.0 h1:X++omBR/4cE2MNg91AoC3rmGrCjJ8eAeUP/K/EKx4DM=
@ -127,6 +145,8 @@ github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0=
github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@ -136,6 +156,7 @@ github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
@ -160,6 +181,8 @@ github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoA
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE=
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
@ -177,6 +200,8 @@ github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19y
github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE=
github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0=
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8=
github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
@ -194,6 +219,8 @@ github.com/kidstuff/mongostore v0.0.0-20181113001930-e650cd85ee4b/go.mod h1:g2nV
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.9.8 h1:VMAMUUOh+gaxKTMk+zqbjsSjsIcUcL/LF4o63i82QyA=
github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
@ -201,6 +228,8 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
@ -219,6 +248,7 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o=
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.11.0 h1:LDdKkqtYlom37fkvqs8rMPFKAMe8+SgjbwZ6ex1/A/Q=
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
@ -247,7 +277,10 @@ github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pierrec/lz4 v2.4.1+incompatible h1:mFe7ttWaflA46Mhqh+jUfjp2qTbPYxLB2/OyBppH9dg=
github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ=
@ -272,7 +305,10 @@ github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7z
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/quasoft/memstore v0.0.0-20180925164028-84a050167438/go.mod h1:wTPjTepVu7uJBYgZ0SdWHQlIas582j6cn2jgk4DDdlg=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ=
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
@ -322,6 +358,8 @@ github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLY
github.com/unrolled/render v1.0.2 h1:dGS3EmChQP3yOi1YeFNO/Dx+MbWZhdvhQJTXochM5bs=
github.com/unrolled/render v1.0.2/go.mod h1:gN9T0NhL4Bfbwu8ann7Ry/TGHYfosul+J0obPf6NBdM=
github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
@ -345,7 +383,10 @@ golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACk
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529 h1:iMGN4xG0cnqj3t+zOM8wUB0BiPKHEwSxEZCvzcbZuvk=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72 h1:+ELyKg6m8UBf0nPFSqD0mi7zUfwPyXo23HNjMnXPz7w=
golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@ -391,6 +432,8 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -460,6 +503,7 @@ golang.org/x/tools v0.0.0-20200108203644-89082a384178 h1:f5gMxb6FbpY48csegk9UPd7
golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
@ -511,6 +555,8 @@ gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUy
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
@ -521,6 +567,16 @@ gopkg.in/go-playground/validator.v9 v9.29.1 h1:SvGtYmN60a5CVKTOzMSyfzWDeZRxRuGvR
gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw=
gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo=
gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM=
gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q=
gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI=
gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4=
gopkg.in/jcmturner/gokrb5.v7 v7.5.0 h1:a9tsXlIDD9SKxotJMK3niV7rPZAJeX2aD/0yg3qlIrg=
gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU=
gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8=
gopkg.in/ldap.v3 v3.1.0 h1:DIDWEjI7vQWREh0S8X5/NFPCZ3MCVd55LmXKPW4XLGE=
gopkg.in/ldap.v3 v3.1.0/go.mod h1:dQjCc0R0kfyFjIlWNMH1DORwUASZyDxo2Ry1B51dXaQ=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
@ -531,6 +587,8 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@ -36,6 +36,16 @@ type OpenTsdbSection struct {
Address string `yaml:"address"`
}
type KafkaSection struct {
Enabled bool `yaml:"enabled"`
Topic string `yaml:"topic"`
BrokersPeers string `yaml:"brokersPeers"`
SaslUser string `yaml:"saslUser"`
SaslPasswd string `yaml:"saslPasswd"`
Retry int `yaml:"retry"`
KeepAlive int64 `yaml:"keepAlive"`
}
type BackendSection struct {
Enabled bool `yaml:"enabled"`
Batch int `yaml:"batch"`
@ -53,6 +63,7 @@ type BackendSection struct {
ClusterList map[string]*ClusterNode `json:"clusterList"`
Influxdb InfluxdbSection `yaml:"influxdb"`
OpenTsdb OpenTsdbSection `yaml:"opentsdb"`
Kafka KafkaSection `yaml:"kafka"`
}
const DefaultSendQueueMaxSize = 102400 //10.24w
@ -71,6 +82,7 @@ var (
JudgeQueues = cache.SafeJudgeQueue{}
InfluxdbQueue *list.SafeListLimited
OpenTsdbQueue *list.SafeListLimited
KafkaQueue = make(chan KafkaData, 10)
// 连接池 node_address -> connection_pool
TsdbConnPools *pools.ConnPools

View File

@ -0,0 +1,119 @@
package backend
import (
"encoding/json"
"errors"
"fmt"
"github.com/Shopify/sarama"
"github.com/toolkits/pkg/logger"
"os"
"strings"
"time"
)
type KafkaData map[string]interface{}
type KfClient struct {
producer sarama.AsyncProducer
cfg *sarama.Config
Topic string
BrokersPeers []string
ticker *time.Ticker
}
func NewKfClient(c KafkaSection) (kafkaSender *KfClient, err error) {
topic := c.Topic
if len(topic) == 0 {
err = errors.New("topic is nil")
return
}
brokers := strings.Split(c.BrokersPeers, ",")
if len(brokers) == 0 {
err = errors.New("brokers is nil")
return
}
hostName, _ := os.Hostname()
cfg := sarama.NewConfig()
cfg.Producer.Return.Successes = true
cfg.Producer.Return.Errors = true
if len(hostName) > 0 {
cfg.ClientID = hostName
}
cfg.Producer.Partitioner = func(topic string) sarama.Partitioner { return sarama.NewRoundRobinPartitioner(topic) }
if len(c.SaslUser) > 0 && len(c.SaslPasswd) > 0 {
cfg.Net.SASL.Enable = true
cfg.Net.SASL.User = c.SaslUser
cfg.Net.SASL.Password = c.SaslPasswd
}
if c.Retry > 0 {
cfg.Producer.Retry.Max = c.Retry
}
cfg.Net.DialTimeout = time.Duration(connTimeout) * time.Millisecond
if c.KeepAlive > 0 {
cfg.Net.KeepAlive = time.Duration(c.KeepAlive) * time.Millisecond
}
producer, err := sarama.NewAsyncProducer(brokers, cfg)
if err != nil {
return
}
kafkaSender = newSender(brokers, topic, cfg, producer)
return
}
func newSender(brokers []string, topic string, cfg *sarama.Config, producer sarama.AsyncProducer) (kf *KfClient) {
kf = &KfClient{
producer: producer,
Topic: topic,
BrokersPeers: brokers,
ticker: time.NewTicker(time.Millisecond * time.Duration(callTimeout)),
}
go kf.readMessageToErrorChan()
return
}
func (kf *KfClient) readMessageToErrorChan() {
var producer = kf.producer
for {
select {
case <-producer.Successes():
case errMsg := <-producer.Errors():
msg, _ := errMsg.Msg.Value.Encode()
logger.Errorf("ReadMessageToErrorChan err:%v %v", errMsg.Error(), string(msg))
}
}
}
func (kf *KfClient) Send(data KafkaData) error {
var producer = kf.producer
message, err := kf.getEventMessage(data)
if err != nil {
logger.Errorf("Dropping event: %v", err)
return err
}
select {
case producer.Input() <- message:
case <-kf.ticker.C:
return fmt.Errorf("send kafka failed:%v[%v]", kf.Topic, kf.BrokersPeers)
}
return nil
}
func (kf *KfClient) Close() error {
logger.Infof("kafka sender(%s) was closed", kf.Topic, kf.BrokersPeers)
_ = kf.producer.Close()
kf.producer = nil
return nil
}
func (kf *KfClient) getEventMessage(event map[string]interface{}) (pm *sarama.ProducerMessage, err error) {
value, err := json.Marshal(event)
if err != nil {
return
}
pm = &sarama.ProducerMessage{
Topic: kf.Topic,
Value: sarama.StringEncoder(string(value)),
}
return
}

View File

@ -74,6 +74,9 @@ func startSendTasks() {
}
if Config.Kafka.Enabled {
go send2KafkaTask()
}
}
func Send2TsdbTask(Q *list.SafeListLimited, node, addr string, concurrent int) {
@ -495,3 +498,38 @@ func convert2OpenTsdbItem(d *dataobj.MetricValue) *dataobj.OpenTsdbItem {
t.Value = d.Value
return &t
}
func Push2KafkaSendQueue(items []*dataobj.MetricValue) {
for _, item := range items {
KafkaQueue <- convert2KafkaItem(item)
}
}
func convert2KafkaItem(d *dataobj.MetricValue) KafkaData {
m := make(KafkaData)
m["metric"] = d.Metric
m["value"] = d.Value
m["timestamp"] = d.Timestamp
m["value"] = d.Value
m["step"] = d.Step
m["endpoint"] = d.Endpoint
m["tags"] = d.Tags
return m
}
func send2KafkaTask() {
kf, err := NewKfClient(Config.Kafka)
if err != nil {
logger.Errorf("init kafka client fail: %v", err)
return
}
defer kf.Close()
for {
kafkaItem := <-KafkaQueue
stats.Counter.Set("points.out.kafka", 1)
err = kf.Send(kafkaItem)
if err != nil {
stats.Counter.Set("points.out.kafka.err", 1)
logger.Errorf("send %v to kafka %s fail: %v", kafkaItem, Config.Kafka.BrokersPeers, err)
}
}
}

View File

@ -48,6 +48,18 @@ func PushData(c *gin.Context) {
backend.Push2JudgeSendQueue(metricValues)
}
if backend.Config.Influxdb.Enabled {
backend.Push2InfluxdbSendQueue(metricValues)
}
if backend.Config.OpenTsdb.Enabled {
backend.Push2OpenTsdbSendQueue(metricValues)
}
if backend.Config.Kafka.Enabled {
backend.Push2KafkaSendQueue(metricValues)
}
if msg != "" {
render.Message(c, msg)
return

View File

@ -52,6 +52,9 @@ func (t *Transfer) Push(args []*dataobj.MetricValue, reply *dataobj.TransferResp
backend.Push2OpenTsdbSendQueue(items)
}
if backend.Config.Kafka.Enabled {
backend.Push2KafkaSendQueue(items)
}
if reply.Invalid == 0 {
reply.Msg = "ok"
}

27
vendor/github.com/Shopify/sarama/.gitignore generated vendored Normal file
View File

@ -0,0 +1,27 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
*.test
# Folders
_obj
_test
.vagrant
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
coverage.txt
profile.out

74
vendor/github.com/Shopify/sarama/.golangci.yml generated vendored Normal file
View File

@ -0,0 +1,74 @@
run:
timeout: 5m
deadline: 10m
linters-settings:
govet:
check-shadowing: false
golint:
min-confidence: 0
gocyclo:
min-complexity: 99
maligned:
suggest-new: true
dupl:
threshold: 100
goconst:
min-len: 2
min-occurrences: 3
misspell:
locale: US
goimports:
local-prefixes: github.com/Shopify/sarama
gocritic:
enabled-tags:
- diagnostic
- experimental
- opinionated
- performance
- style
disabled-checks:
- wrapperFunc
- ifElseChain
funlen:
lines: 300
statements: 300
linters:
disable-all: true
enable:
- bodyclose
- deadcode
- depguard
- dogsled
# - dupl
- errcheck
- funlen
# - gocritic
- gocyclo
- gofmt
- goimports
# - golint
- gosec
# - gosimple
- govet
# - ineffassign
- interfacer
# - misspell
# - nakedret
# - scopelint
# - staticcheck
- structcheck
# - stylecheck
- typecheck
- unconvert
- unused
- varcheck
- whitespace
# - goconst
# - gochecknoinits
issues:
exclude:
- consider giving a name to these results
- include an explanation for nolint directive

912
vendor/github.com/Shopify/sarama/CHANGELOG.md generated vendored Normal file
View File

@ -0,0 +1,912 @@
# Changelog
#### Unreleased
#### Version 1.26.1 (2020-02-04)
Improvements:
- Add requests-in-flight metric ([1539](https://github.com/Shopify/sarama/pull/1539))
- Fix misleading example for cluster admin ([1595](https://github.com/Shopify/sarama/pull/1595))
- Replace Travis with GitHub Actions, linters housekeeping ([1573](https://github.com/Shopify/sarama/pull/1573))
- Allow BalanceStrategy to provide custom assignment data ([1592](https://github.com/Shopify/sarama/pull/1592))
Bug Fixes:
- Adds back Consumer.Offsets.CommitInterval to fix API ([1590](https://github.com/Shopify/sarama/pull/1590))
- Fix error message s/CommitInterval/AutoCommit.Interval ([1589](https://github.com/Shopify/sarama/pull/1589))
#### Version 1.26.0 (2020-01-24)
New Features:
- Enable zstd compression
([1574](https://github.com/Shopify/sarama/pull/1574),
[1582](https://github.com/Shopify/sarama/pull/1582))
- Support headers in tools kafka-console-producer
([1549](https://github.com/Shopify/sarama/pull/1549))
Improvements:
- Add SASL AuthIdentity to SASL frames (authzid)
([1585](https://github.com/Shopify/sarama/pull/1585)).
Bug Fixes:
- Sending messages with ZStd compression enabled fails in multiple ways
([1252](https://github.com/Shopify/sarama/issues/1252)).
- Use the broker for any admin on BrokerConfig
([1571](https://github.com/Shopify/sarama/pull/1571)).
- Set DescribeConfigRequest Version field
([1576](https://github.com/Shopify/sarama/pull/1576)).
- ConsumerGroup flooding logs with client/metadata update req
([1578](https://github.com/Shopify/sarama/pull/1578)).
- MetadataRequest version in DescribeCluster
([1580](https://github.com/Shopify/sarama/pull/1580)).
- Fix deadlock in consumer group handleError
([1581](https://github.com/Shopify/sarama/pull/1581))
- Fill in the Fetch{Request,Response} protocol
([1582](https://github.com/Shopify/sarama/pull/1582)).
- Retry topic request on ControllerNotAvailable
([1586](https://github.com/Shopify/sarama/pull/1586)).
#### Version 1.25.0 (2020-01-13)
New Features:
- Support TLS protocol in kafka-producer-performance
([1538](https://github.com/Shopify/sarama/pull/1538)).
- Add support for kafka 2.4.0
([1552](https://github.com/Shopify/sarama/pull/1552)).
Improvements:
- Allow the Consumer to disable auto-commit offsets
([1164](https://github.com/Shopify/sarama/pull/1164)).
- Produce records with consistent timestamps
([1455](https://github.com/Shopify/sarama/pull/1455)).
Bug Fixes:
- Fix incorrect SetTopicMetadata name mentions
([1534](https://github.com/Shopify/sarama/pull/1534)).
- Fix client.tryRefreshMetadata Println
([1535](https://github.com/Shopify/sarama/pull/1535)).
- Fix panic on calling updateMetadata on closed client
([1531](https://github.com/Shopify/sarama/pull/1531)).
- Fix possible faulty metrics in TestFuncProducing
([1545](https://github.com/Shopify/sarama/pull/1545)).
#### Version 1.24.1 (2019-10-31)
New Features:
- Add DescribeLogDirs Request/Response pair
([1520](https://github.com/Shopify/sarama/pull/1520)).
Bug Fixes:
- Fix ClusterAdmin returning invalid controller ID on DescribeCluster
([1518](https://github.com/Shopify/sarama/pull/1518)).
- Fix issue with consumergroup not rebalancing when new partition is added
([1525](https://github.com/Shopify/sarama/pull/1525)).
- Ensure consistent use of read/write deadlines
([1529](https://github.com/Shopify/sarama/pull/1529)).
#### Version 1.24.0 (2019-10-09)
New Features:
- Add sticky partition assignor
([1416](https://github.com/Shopify/sarama/pull/1416)).
- Switch from cgo zstd package to pure Go implementation
([1477](https://github.com/Shopify/sarama/pull/1477)).
Improvements:
- Allow creating ClusterAdmin from client
([1415](https://github.com/Shopify/sarama/pull/1415)).
- Set KafkaVersion in ListAcls method
([1452](https://github.com/Shopify/sarama/pull/1452)).
- Set request version in CreateACL ClusterAdmin method
([1458](https://github.com/Shopify/sarama/pull/1458)).
- Set request version in DeleteACL ClusterAdmin method
([1461](https://github.com/Shopify/sarama/pull/1461)).
- Handle missed error codes on TopicMetaDataRequest and GroupCoordinatorRequest
([1464](https://github.com/Shopify/sarama/pull/1464)).
- Remove direct usage of gofork
([1465](https://github.com/Shopify/sarama/pull/1465)).
- Add support for Go 1.13
([1478](https://github.com/Shopify/sarama/pull/1478)).
- Improve behavior of NewMockListAclsResponse
([1481](https://github.com/Shopify/sarama/pull/1481)).
Bug Fixes:
- Fix race condition in consumergroup example
([1434](https://github.com/Shopify/sarama/pull/1434)).
- Fix brokerProducer goroutine leak
([1442](https://github.com/Shopify/sarama/pull/1442)).
- Use released version of lz4 library
([1469](https://github.com/Shopify/sarama/pull/1469)).
- Set correct version in MockDeleteTopicsResponse
([1484](https://github.com/Shopify/sarama/pull/1484)).
- Fix CLI help message typo
([1494](https://github.com/Shopify/sarama/pull/1494)).
Known Issues:
- Please **don't** use Zstd, as it doesn't work right now.
See https://github.com/Shopify/sarama/issues/1252
#### Version 1.23.1 (2019-07-22)
Bug Fixes:
- Fix fetch delete bug record
([1425](https://github.com/Shopify/sarama/pull/1425)).
- Handle SASL/OAUTHBEARER token rejection
([1428](https://github.com/Shopify/sarama/pull/1428)).
#### Version 1.23.0 (2019-07-02)
New Features:
- Add support for Kafka 2.3.0
([1418](https://github.com/Shopify/sarama/pull/1418)).
- Add support for ListConsumerGroupOffsets v2
([1374](https://github.com/Shopify/sarama/pull/1374)).
- Add support for DeleteConsumerGroup
([1417](https://github.com/Shopify/sarama/pull/1417)).
- Add support for SASLVersion configuration
([1410](https://github.com/Shopify/sarama/pull/1410)).
- Add kerberos support
([1366](https://github.com/Shopify/sarama/pull/1366)).
Improvements:
- Improve sasl_scram_client example
([1406](https://github.com/Shopify/sarama/pull/1406)).
- Fix shutdown and race-condition in consumer-group example
([1404](https://github.com/Shopify/sarama/pull/1404)).
- Add support for error codes 77—81
([1397](https://github.com/Shopify/sarama/pull/1397)).
- Pool internal objects allocated per message
([1385](https://github.com/Shopify/sarama/pull/1385)).
- Reduce packet decoder allocations
([1373](https://github.com/Shopify/sarama/pull/1373)).
- Support timeout when fetching metadata
([1359](https://github.com/Shopify/sarama/pull/1359)).
Bug Fixes:
- Fix fetch size integer overflow
([1376](https://github.com/Shopify/sarama/pull/1376)).
- Handle and log throttled FetchResponses
([1383](https://github.com/Shopify/sarama/pull/1383)).
- Refactor misspelled word Resouce to Resource
([1368](https://github.com/Shopify/sarama/pull/1368)).
#### Version 1.22.1 (2019-04-29)
Improvements:
- Use zstd 1.3.8
([1350](https://github.com/Shopify/sarama/pull/1350)).
- Add support for SaslHandshakeRequest v1
([1354](https://github.com/Shopify/sarama/pull/1354)).
Bug Fixes:
- Fix V5 MetadataRequest nullable topics array
([1353](https://github.com/Shopify/sarama/pull/1353)).
- Use a different SCRAM client for each broker connection
([1349](https://github.com/Shopify/sarama/pull/1349)).
- Fix AllowAutoTopicCreation for MetadataRequest greater than v3
([1344](https://github.com/Shopify/sarama/pull/1344)).
#### Version 1.22.0 (2019-04-09)
New Features:
- Add Offline Replicas Operation to Client
([1318](https://github.com/Shopify/sarama/pull/1318)).
- Allow using proxy when connecting to broker
([1326](https://github.com/Shopify/sarama/pull/1326)).
- Implement ReadCommitted
([1307](https://github.com/Shopify/sarama/pull/1307)).
- Add support for Kafka 2.2.0
([1331](https://github.com/Shopify/sarama/pull/1331)).
- Add SASL SCRAM-SHA-512 and SCRAM-SHA-256 mechanismes
([1331](https://github.com/Shopify/sarama/pull/1295)).
Improvements:
- Unregister all broker metrics on broker stop
([1232](https://github.com/Shopify/sarama/pull/1232)).
- Add SCRAM authentication example
([1303](https://github.com/Shopify/sarama/pull/1303)).
- Add consumergroup examples
([1304](https://github.com/Shopify/sarama/pull/1304)).
- Expose consumer batch size metric
([1296](https://github.com/Shopify/sarama/pull/1296)).
- Add TLS options to console producer and consumer
([1300](https://github.com/Shopify/sarama/pull/1300)).
- Reduce client close bookkeeping
([1297](https://github.com/Shopify/sarama/pull/1297)).
- Satisfy error interface in create responses
([1154](https://github.com/Shopify/sarama/pull/1154)).
- Please lint gods
([1346](https://github.com/Shopify/sarama/pull/1346)).
Bug Fixes:
- Fix multi consumer group instance crash
([1338](https://github.com/Shopify/sarama/pull/1338)).
- Update lz4 to latest version
([1347](https://github.com/Shopify/sarama/pull/1347)).
- Retry ErrNotCoordinatorForConsumer in new consumergroup session
([1231](https://github.com/Shopify/sarama/pull/1231)).
- Fix cleanup error handler
([1332](https://github.com/Shopify/sarama/pull/1332)).
- Fix rate condition in PartitionConsumer
([1156](https://github.com/Shopify/sarama/pull/1156)).
#### Version 1.21.0 (2019-02-24)
New Features:
- Add CreateAclRequest, DescribeAclRequest, DeleteAclRequest
([1236](https://github.com/Shopify/sarama/pull/1236)).
- Add DescribeTopic, DescribeConsumerGroup, ListConsumerGroups, ListConsumerGroupOffsets admin requests
([1178](https://github.com/Shopify/sarama/pull/1178)).
- Implement SASL/OAUTHBEARER
([1240](https://github.com/Shopify/sarama/pull/1240)).
Improvements:
- Add Go mod support
([1282](https://github.com/Shopify/sarama/pull/1282)).
- Add error codes 73—76
([1239](https://github.com/Shopify/sarama/pull/1239)).
- Add retry backoff function
([1160](https://github.com/Shopify/sarama/pull/1160)).
- Maintain metadata in the producer even when retries are disabled
([1189](https://github.com/Shopify/sarama/pull/1189)).
- Include ReplicaAssignment in ListTopics
([1274](https://github.com/Shopify/sarama/pull/1274)).
- Add producer performance tool
([1222](https://github.com/Shopify/sarama/pull/1222)).
- Add support LogAppend timestamps
([1258](https://github.com/Shopify/sarama/pull/1258)).
Bug Fixes:
- Fix potential deadlock when a heartbeat request fails
([1286](https://github.com/Shopify/sarama/pull/1286)).
- Fix consuming compacted topic
([1227](https://github.com/Shopify/sarama/pull/1227)).
- Set correct Kafka version for DescribeConfigsRequest v1
([1277](https://github.com/Shopify/sarama/pull/1277)).
- Update kafka test version
([1273](https://github.com/Shopify/sarama/pull/1273)).
#### Version 1.20.1 (2019-01-10)
New Features:
- Add optional replica id in offset request
([1100](https://github.com/Shopify/sarama/pull/1100)).
Improvements:
- Implement DescribeConfigs Request + Response v1 & v2
([1230](https://github.com/Shopify/sarama/pull/1230)).
- Reuse compression objects
([1185](https://github.com/Shopify/sarama/pull/1185)).
- Switch from png to svg for GoDoc link in README
([1243](https://github.com/Shopify/sarama/pull/1243)).
- Fix typo in deprecation notice for FetchResponseBlock.Records
([1242](https://github.com/Shopify/sarama/pull/1242)).
- Fix typos in consumer metadata response file
([1244](https://github.com/Shopify/sarama/pull/1244)).
Bug Fixes:
- Revert to individual msg retries for non-idempotent
([1203](https://github.com/Shopify/sarama/pull/1203)).
- Respect MaxMessageBytes limit for uncompressed messages
([1141](https://github.com/Shopify/sarama/pull/1141)).
#### Version 1.20.0 (2018-12-10)
New Features:
- Add support for zstd compression
([#1170](https://github.com/Shopify/sarama/pull/1170)).
- Add support for Idempotent Producer
([#1152](https://github.com/Shopify/sarama/pull/1152)).
- Add support support for Kafka 2.1.0
([#1229](https://github.com/Shopify/sarama/pull/1229)).
- Add support support for OffsetCommit request/response pairs versions v1 to v5
([#1201](https://github.com/Shopify/sarama/pull/1201)).
- Add support support for OffsetFetch request/response pair up to version v5
([#1198](https://github.com/Shopify/sarama/pull/1198)).
Improvements:
- Export broker's Rack setting
([#1173](https://github.com/Shopify/sarama/pull/1173)).
- Always use latest patch version of Go on CI
([#1202](https://github.com/Shopify/sarama/pull/1202)).
- Add error codes 61 to 72
([#1195](https://github.com/Shopify/sarama/pull/1195)).
Bug Fixes:
- Fix build without cgo
([#1182](https://github.com/Shopify/sarama/pull/1182)).
- Fix go vet suggestion in consumer group file
([#1209](https://github.com/Shopify/sarama/pull/1209)).
- Fix typos in code and comments
([#1228](https://github.com/Shopify/sarama/pull/1228)).
#### Version 1.19.0 (2018-09-27)
New Features:
- Implement a higher-level consumer group
([#1099](https://github.com/Shopify/sarama/pull/1099)).
Improvements:
- Add support for Go 1.11
([#1176](https://github.com/Shopify/sarama/pull/1176)).
Bug Fixes:
- Fix encoding of `MetadataResponse` with version 2 and higher
([#1174](https://github.com/Shopify/sarama/pull/1174)).
- Fix race condition in mock async producer
([#1174](https://github.com/Shopify/sarama/pull/1174)).
#### Version 1.18.0 (2018-09-07)
New Features:
- Make `Partitioner.RequiresConsistency` vary per-message
([#1112](https://github.com/Shopify/sarama/pull/1112)).
- Add customizable partitioner
([#1118](https://github.com/Shopify/sarama/pull/1118)).
- Add `ClusterAdmin` support for `CreateTopic`, `DeleteTopic`, `CreatePartitions`,
`DeleteRecords`, `DescribeConfig`, `AlterConfig`, `CreateACL`, `ListAcls`, `DeleteACL`
([#1055](https://github.com/Shopify/sarama/pull/1055)).
Improvements:
- Add support for Kafka 2.0.0
([#1149](https://github.com/Shopify/sarama/pull/1149)).
- Allow setting `LocalAddr` when dialing an address to support multi-homed hosts
([#1123](https://github.com/Shopify/sarama/pull/1123)).
- Simpler offset management
([#1127](https://github.com/Shopify/sarama/pull/1127)).
Bug Fixes:
- Fix mutation of `ProducerMessage.MetaData` when producing to Kafka
([#1110](https://github.com/Shopify/sarama/pull/1110)).
- Fix consumer block when response did not contain all the
expected topic/partition blocks
([#1086](https://github.com/Shopify/sarama/pull/1086)).
- Fix consumer block when response contains only constrol messages
([#1115](https://github.com/Shopify/sarama/pull/1115)).
- Add timeout config for ClusterAdmin requests
([#1142](https://github.com/Shopify/sarama/pull/1142)).
- Add version check when producing message with headers
([#1117](https://github.com/Shopify/sarama/pull/1117)).
- Fix `MetadataRequest` for empty list of topics
([#1132](https://github.com/Shopify/sarama/pull/1132)).
- Fix producer topic metadata on-demand fetch when topic error happens in metadata response
([#1125](https://github.com/Shopify/sarama/pull/1125)).
#### Version 1.17.0 (2018-05-30)
New Features:
- Add support for gzip compression levels
([#1044](https://github.com/Shopify/sarama/pull/1044)).
- Add support for Metadata request/response pairs versions v1 to v5
([#1047](https://github.com/Shopify/sarama/pull/1047),
[#1069](https://github.com/Shopify/sarama/pull/1069)).
- Add versioning to JoinGroup request/response pairs
([#1098](https://github.com/Shopify/sarama/pull/1098))
- Add support for CreatePartitions, DeleteGroups, DeleteRecords request/response pairs
([#1065](https://github.com/Shopify/sarama/pull/1065),
[#1096](https://github.com/Shopify/sarama/pull/1096),
[#1027](https://github.com/Shopify/sarama/pull/1027)).
- Add `Controller()` method to Client interface
([#1063](https://github.com/Shopify/sarama/pull/1063)).
Improvements:
- ConsumerMetadataReq/Resp has been migrated to FindCoordinatorReq/Resp
([#1010](https://github.com/Shopify/sarama/pull/1010)).
- Expose missing protocol parts: `msgSet` and `recordBatch`
([#1049](https://github.com/Shopify/sarama/pull/1049)).
- Add support for v1 DeleteTopics Request
([#1052](https://github.com/Shopify/sarama/pull/1052)).
- Add support for Go 1.10
([#1064](https://github.com/Shopify/sarama/pull/1064)).
- Claim support for Kafka 1.1.0
([#1073](https://github.com/Shopify/sarama/pull/1073)).
Bug Fixes:
- Fix FindCoordinatorResponse.encode to allow nil Coordinator
([#1050](https://github.com/Shopify/sarama/pull/1050),
[#1051](https://github.com/Shopify/sarama/pull/1051)).
- Clear all metadata when we have the latest topic info
([#1033](https://github.com/Shopify/sarama/pull/1033)).
- Make `PartitionConsumer.Close` idempotent
([#1092](https://github.com/Shopify/sarama/pull/1092)).
#### Version 1.16.0 (2018-02-12)
New Features:
- Add support for the Create/Delete Topics request/response pairs
([#1007](https://github.com/Shopify/sarama/pull/1007),
[#1008](https://github.com/Shopify/sarama/pull/1008)).
- Add support for the Describe/Create/Delete ACL request/response pairs
([#1009](https://github.com/Shopify/sarama/pull/1009)).
- Add support for the five transaction-related request/response pairs
([#1016](https://github.com/Shopify/sarama/pull/1016)).
Improvements:
- Permit setting version on mock producer responses
([#999](https://github.com/Shopify/sarama/pull/999)).
- Add `NewMockBrokerListener` helper for testing TLS connections
([#1019](https://github.com/Shopify/sarama/pull/1019)).
- Changed the default value for `Consumer.Fetch.Default` from 32KiB to 1MiB
which results in much higher throughput in most cases
([#1024](https://github.com/Shopify/sarama/pull/1024)).
- Reuse the `time.Ticker` across fetch requests in the PartitionConsumer to
reduce CPU and memory usage when processing many partitions
([#1028](https://github.com/Shopify/sarama/pull/1028)).
- Assign relative offsets to messages in the producer to save the brokers a
recompression pass
([#1002](https://github.com/Shopify/sarama/pull/1002),
[#1015](https://github.com/Shopify/sarama/pull/1015)).
Bug Fixes:
- Fix producing uncompressed batches with the new protocol format
([#1032](https://github.com/Shopify/sarama/issues/1032)).
- Fix consuming compacted topics with the new protocol format
([#1005](https://github.com/Shopify/sarama/issues/1005)).
- Fix consuming topics with a mix of protocol formats
([#1021](https://github.com/Shopify/sarama/issues/1021)).
- Fix consuming when the broker includes multiple batches in a single response
([#1022](https://github.com/Shopify/sarama/issues/1022)).
- Fix detection of `PartialTrailingMessage` when the partial message was
truncated before the magic value indicating its version
([#1030](https://github.com/Shopify/sarama/pull/1030)).
- Fix expectation-checking in the mock of `SyncProducer.SendMessages`
([#1035](https://github.com/Shopify/sarama/pull/1035)).
#### Version 1.15.0 (2017-12-08)
New Features:
- Claim official support for Kafka 1.0, though it did already work
([#984](https://github.com/Shopify/sarama/pull/984)).
- Helper methods for Kafka version numbers to/from strings
([#989](https://github.com/Shopify/sarama/pull/989)).
- Implement CreatePartitions request/response
([#985](https://github.com/Shopify/sarama/pull/985)).
Improvements:
- Add error codes 45-60
([#986](https://github.com/Shopify/sarama/issues/986)).
Bug Fixes:
- Fix slow consuming for certain Kafka 0.11/1.0 configurations
([#982](https://github.com/Shopify/sarama/pull/982)).
- Correctly determine when a FetchResponse contains the new message format
([#990](https://github.com/Shopify/sarama/pull/990)).
- Fix producing with multiple headers
([#996](https://github.com/Shopify/sarama/pull/996)).
- Fix handling of truncated record batches
([#998](https://github.com/Shopify/sarama/pull/998)).
- Fix leaking metrics when closing brokers
([#991](https://github.com/Shopify/sarama/pull/991)).
#### Version 1.14.0 (2017-11-13)
New Features:
- Add support for the new Kafka 0.11 record-batch format, including the wire
protocol and the necessary behavioural changes in the producer and consumer.
Transactions and idempotency are not yet supported, but producing and
consuming should work with all the existing bells and whistles (batching,
compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta
of Arista Networks for this work. Part of
([#901](https://github.com/Shopify/sarama/issues/901)).
Bug Fixes:
- Fix encoding of ProduceResponse versions in test
([#970](https://github.com/Shopify/sarama/pull/970)).
- Return partial replicas list when we have it
([#975](https://github.com/Shopify/sarama/pull/975)).
#### Version 1.13.0 (2017-10-04)
New Features:
- Support for FetchRequest version 3
([#905](https://github.com/Shopify/sarama/pull/905)).
- Permit setting version on mock FetchResponses
([#939](https://github.com/Shopify/sarama/pull/939)).
- Add a configuration option to support storing only minimal metadata for
extremely large clusters
([#937](https://github.com/Shopify/sarama/pull/937)).
- Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets
([#932](https://github.com/Shopify/sarama/pull/932)).
Improvements:
- Provide the block-level timestamp when consuming compressed messages
([#885](https://github.com/Shopify/sarama/issues/885)).
- `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned
by the broker, which can be meaningful
([#930](https://github.com/Shopify/sarama/pull/930)).
- Use a `Ticker` to reduce consumer timer overhead at the cost of higher
variance in the actual timeout
([#933](https://github.com/Shopify/sarama/pull/933)).
Bug Fixes:
- Gracefully handle messages with negative timestamps
([#907](https://github.com/Shopify/sarama/pull/907)).
- Raise a proper error when encountering an unknown message version
([#940](https://github.com/Shopify/sarama/pull/940)).
#### Version 1.12.0 (2017-05-08)
New Features:
- Added support for the `ApiVersions` request and response pair, and Kafka
version 0.10.2 ([#867](https://github.com/Shopify/sarama/pull/867)). Note
that you still need to specify the Kafka version in the Sarama configuration
for the time being.
- Added a `Brokers` method to the Client which returns the complete set of
active brokers ([#813](https://github.com/Shopify/sarama/pull/813)).
- Added an `InSyncReplicas` method to the Client which returns the set of all
in-sync broker IDs for the given partition, now that the Kafka versions for
which this was misleading are no longer in our supported set
([#872](https://github.com/Shopify/sarama/pull/872)).
- Added a `NewCustomHashPartitioner` method which allows constructing a hash
partitioner with a custom hash method in case the default (FNV-1a) is not
suitable
([#837](https://github.com/Shopify/sarama/pull/837),
[#841](https://github.com/Shopify/sarama/pull/841)).
Improvements:
- Recognize more Kafka error codes
([#859](https://github.com/Shopify/sarama/pull/859)).
Bug Fixes:
- Fix an issue where decoding a malformed FetchRequest would not return the
correct error ([#818](https://github.com/Shopify/sarama/pull/818)).
- Respect ordering of group protocols in JoinGroupRequests. This fix is
transparent if you're using the `AddGroupProtocol` or
`AddGroupProtocolMetadata` helpers; otherwise you will need to switch from
the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols`
([#812](https://github.com/Shopify/sarama/issues/812)).
- Fix an alignment-related issue with atomics on 32-bit architectures
([#859](https://github.com/Shopify/sarama/pull/859)).
#### Version 1.11.0 (2016-12-20)
_Important:_ As of Sarama 1.11 it is necessary to set the config value of
`Producer.Return.Successes` to true in order to use the SyncProducer. Previous
versions would silently override this value when instantiating a SyncProducer
which led to unexpected values and data races.
New Features:
- Metrics! Thanks to Sébastien Launay for all his work on this feature
([#701](https://github.com/Shopify/sarama/pull/701),
[#746](https://github.com/Shopify/sarama/pull/746),
[#766](https://github.com/Shopify/sarama/pull/766)).
- Add support for LZ4 compression
([#786](https://github.com/Shopify/sarama/pull/786)).
- Add support for ListOffsetRequest v1 and Kafka 0.10.1
([#775](https://github.com/Shopify/sarama/pull/775)).
- Added a `HighWaterMarks` method to the Consumer which aggregates the
`HighWaterMarkOffset` values of its child topic/partitions
([#769](https://github.com/Shopify/sarama/pull/769)).
Bug Fixes:
- Fixed producing when using timestamps, compression and Kafka 0.10
([#759](https://github.com/Shopify/sarama/pull/759)).
- Added missing decoder methods to DescribeGroups response
([#756](https://github.com/Shopify/sarama/pull/756)).
- Fix producer shutdown when `Return.Errors` is disabled
([#787](https://github.com/Shopify/sarama/pull/787)).
- Don't mutate configuration in SyncProducer
([#790](https://github.com/Shopify/sarama/pull/790)).
- Fix crash on SASL initialization failure
([#795](https://github.com/Shopify/sarama/pull/795)).
#### Version 1.10.1 (2016-08-30)
Bug Fixes:
- Fix the documentation for `HashPartitioner` which was incorrect
([#717](https://github.com/Shopify/sarama/pull/717)).
- Permit client creation even when it is limited by ACLs
([#722](https://github.com/Shopify/sarama/pull/722)).
- Several fixes to the consumer timer optimization code, regressions introduced
in v1.10.0. Go's timers are finicky
([#730](https://github.com/Shopify/sarama/pull/730),
[#733](https://github.com/Shopify/sarama/pull/733),
[#734](https://github.com/Shopify/sarama/pull/734)).
- Handle consuming compressed relative offsets with Kafka 0.10
([#735](https://github.com/Shopify/sarama/pull/735)).
#### Version 1.10.0 (2016-08-02)
_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of
Kafka you are running against (via the `config.Version` value) in order to use
features that may not be compatible with old Kafka versions. If you don't
specify this value it will default to 0.8.2 (the minimum supported), and trying
to use more recent features (like the offset manager) will fail with an error.
_Also:_ The offset-manager's behaviour has been changed to match the upstream
java consumer (see [#705](https://github.com/Shopify/sarama/pull/705) and
[#713](https://github.com/Shopify/sarama/pull/713)). If you use the
offset-manager, please ensure that you are committing one *greater* than the
last consumed message offset or else you may end up consuming duplicate
messages.
New Features:
- Support for Kafka 0.10
([#672](https://github.com/Shopify/sarama/pull/672),
[#678](https://github.com/Shopify/sarama/pull/678),
[#681](https://github.com/Shopify/sarama/pull/681), and others).
- Support for configuring the target Kafka version
([#676](https://github.com/Shopify/sarama/pull/676)).
- Batch producing support in the SyncProducer
([#677](https://github.com/Shopify/sarama/pull/677)).
- Extend producer mock to allow setting expectations on message contents
([#667](https://github.com/Shopify/sarama/pull/667)).
Improvements:
- Support `nil` compressed messages for deleting in compacted topics
([#634](https://github.com/Shopify/sarama/pull/634)).
- Pre-allocate decoding errors, greatly reducing heap usage and GC time against
misbehaving brokers ([#690](https://github.com/Shopify/sarama/pull/690)).
- Re-use consumer expiry timers, removing one allocation per consumed message
([#707](https://github.com/Shopify/sarama/pull/707)).
Bug Fixes:
- Actually default the client ID to "sarama" like we say we do
([#664](https://github.com/Shopify/sarama/pull/664)).
- Fix a rare issue where `Client.Leader` could return the wrong error
([#685](https://github.com/Shopify/sarama/pull/685)).
- Fix a possible tight loop in the consumer
([#693](https://github.com/Shopify/sarama/pull/693)).
- Match upstream's offset-tracking behaviour
([#705](https://github.com/Shopify/sarama/pull/705)).
- Report UnknownTopicOrPartition errors from the offset manager
([#706](https://github.com/Shopify/sarama/pull/706)).
- Fix possible negative partition value from the HashPartitioner
([#709](https://github.com/Shopify/sarama/pull/709)).
#### Version 1.9.0 (2016-05-16)
New Features:
- Add support for custom offset manager retention durations
([#602](https://github.com/Shopify/sarama/pull/602)).
- Publish low-level mocks to enable testing of third-party producer/consumer
implementations ([#570](https://github.com/Shopify/sarama/pull/570)).
- Declare support for Golang 1.6
([#611](https://github.com/Shopify/sarama/pull/611)).
- Support for SASL plain-text auth
([#648](https://github.com/Shopify/sarama/pull/648)).
Improvements:
- Simplified broker locking scheme slightly
([#604](https://github.com/Shopify/sarama/pull/604)).
- Documentation cleanup
([#605](https://github.com/Shopify/sarama/pull/605),
[#621](https://github.com/Shopify/sarama/pull/621),
[#654](https://github.com/Shopify/sarama/pull/654)).
Bug Fixes:
- Fix race condition shutting down the OffsetManager
([#658](https://github.com/Shopify/sarama/pull/658)).
#### Version 1.8.0 (2016-02-01)
New Features:
- Full support for Kafka 0.9:
- All protocol messages and fields
([#586](https://github.com/Shopify/sarama/pull/586),
[#588](https://github.com/Shopify/sarama/pull/588),
[#590](https://github.com/Shopify/sarama/pull/590)).
- Verified that TLS support works
([#581](https://github.com/Shopify/sarama/pull/581)).
- Fixed the OffsetManager compatibility
([#585](https://github.com/Shopify/sarama/pull/585)).
Improvements:
- Optimize for fewer system calls when reading from the network
([#584](https://github.com/Shopify/sarama/pull/584)).
- Automatically retry `InvalidMessage` errors to match upstream behaviour
([#589](https://github.com/Shopify/sarama/pull/589)).
#### Version 1.7.0 (2015-12-11)
New Features:
- Preliminary support for Kafka 0.9
([#572](https://github.com/Shopify/sarama/pull/572)). This comes with several
caveats:
- Protocol-layer support is mostly in place
([#577](https://github.com/Shopify/sarama/pull/577)), however Kafka 0.9
renamed some messages and fields, which we did not in order to preserve API
compatibility.
- The producer and consumer work against 0.9, but the offset manager does
not ([#573](https://github.com/Shopify/sarama/pull/573)).
- TLS support may or may not work
([#581](https://github.com/Shopify/sarama/pull/581)).
Improvements:
- Don't wait for request timeouts on dead brokers, greatly speeding recovery
when the TCP connection is left hanging
([#548](https://github.com/Shopify/sarama/pull/548)).
- Refactored part of the producer. The new version provides a much more elegant
solution to [#449](https://github.com/Shopify/sarama/pull/449). It is also
slightly more efficient, and much more precise in calculating batch sizes
when compression is used
([#549](https://github.com/Shopify/sarama/pull/549),
[#550](https://github.com/Shopify/sarama/pull/550),
[#551](https://github.com/Shopify/sarama/pull/551)).
Bug Fixes:
- Fix race condition in consumer test mock
([#553](https://github.com/Shopify/sarama/pull/553)).
#### Version 1.6.1 (2015-09-25)
Bug Fixes:
- Fix panic that could occur if a user-supplied message value failed to encode
([#449](https://github.com/Shopify/sarama/pull/449)).
#### Version 1.6.0 (2015-09-04)
New Features:
- Implementation of a consumer offset manager using the APIs introduced in
Kafka 0.8.2. The API is designed mainly for integration into a future
high-level consumer, not for direct use, although it is *possible* to use it
directly.
([#461](https://github.com/Shopify/sarama/pull/461)).
Improvements:
- CRC32 calculation is much faster on machines with SSE4.2 instructions,
removing a major hotspot from most profiles
([#255](https://github.com/Shopify/sarama/pull/255)).
Bug Fixes:
- Make protocol decoding more robust against some malformed packets generated
by go-fuzz ([#523](https://github.com/Shopify/sarama/pull/523),
[#525](https://github.com/Shopify/sarama/pull/525)) or found in other ways
([#528](https://github.com/Shopify/sarama/pull/528)).
- Fix a potential race condition panic in the consumer on shutdown
([#529](https://github.com/Shopify/sarama/pull/529)).
#### Version 1.5.0 (2015-08-17)
New Features:
- TLS-encrypted network connections are now supported. This feature is subject
to change when Kafka releases built-in TLS support, but for now this is
enough to work with TLS-terminating proxies
([#154](https://github.com/Shopify/sarama/pull/154)).
Improvements:
- The consumer will not block if a single partition is not drained by the user;
all other partitions will continue to consume normally
([#485](https://github.com/Shopify/sarama/pull/485)).
- Formatting of error strings has been much improved
([#495](https://github.com/Shopify/sarama/pull/495)).
- Internal refactoring of the producer for code cleanliness and to enable
future work ([#300](https://github.com/Shopify/sarama/pull/300)).
Bug Fixes:
- Fix a potential deadlock in the consumer on shutdown
([#475](https://github.com/Shopify/sarama/pull/475)).
#### Version 1.4.3 (2015-07-21)
Bug Fixes:
- Don't include the partitioner in the producer's "fetch partitions"
circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)).
- Don't retry messages until the broker is closed when abandoning a broker in
the producer ([#468](https://github.com/Shopify/sarama/pull/468)).
- Update the import path for snappy-go, it has moved again and the API has
changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)).
#### Version 1.4.2 (2015-05-27)
Bug Fixes:
- Update the import path for snappy-go, it has moved from google code to github
([#456](https://github.com/Shopify/sarama/pull/456)).
#### Version 1.4.1 (2015-05-25)
Improvements:
- Optimizations when decoding snappy messages, thanks to John Potocny
([#446](https://github.com/Shopify/sarama/pull/446)).
Bug Fixes:
- Fix hypothetical race conditions on producer shutdown
([#450](https://github.com/Shopify/sarama/pull/450),
[#451](https://github.com/Shopify/sarama/pull/451)).
#### Version 1.4.0 (2015-05-01)
New Features:
- The consumer now implements `Topics()` and `Partitions()` methods to enable
users to dynamically choose what topics/partitions to consume without
instantiating a full client
([#431](https://github.com/Shopify/sarama/pull/431)).
- The partition-consumer now exposes the high water mark offset value returned
by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)).
- Added a `kafka-console-consumer` tool capable of handling multiple
partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer`
([#439](https://github.com/Shopify/sarama/pull/439),
[#442](https://github.com/Shopify/sarama/pull/442)).
Improvements:
- The producer's logging during retry scenarios is more consistent, more
useful, and slightly less verbose
([#429](https://github.com/Shopify/sarama/pull/429)).
- The client now shuffles its initial list of seed brokers in order to prevent
thundering herd on the first broker in the list
([#441](https://github.com/Shopify/sarama/pull/441)).
Bug Fixes:
- The producer now correctly manages its state if retries occur when it is
shutting down, fixing several instances of confusing behaviour and at least
one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)).
- The consumer now handles messages for different partitions asynchronously,
making it much more resilient to specific user code ordering
([#325](https://github.com/Shopify/sarama/pull/325)).
#### Version 1.3.0 (2015-04-16)
New Features:
- The client now tracks consumer group coordinators using
ConsumerMetadataRequests similar to how it tracks partition leadership using
regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)).
This adds two methods to the client API:
- `Coordinator(consumerGroup string) (*Broker, error)`
- `RefreshCoordinator(consumerGroup string) error`
Improvements:
- ConsumerMetadataResponses now automatically create a Broker object out of the
ID/address/port combination for the Coordinator; accessing the fields
individually has been deprecated
([#413](https://github.com/Shopify/sarama/pull/413)).
- Much improved handling of `OffsetOutOfRange` errors in the consumer.
Consumers will fail to start if the provided offset is out of range
([#418](https://github.com/Shopify/sarama/pull/418))
and they will automatically shut down if the offset falls out of range
([#424](https://github.com/Shopify/sarama/pull/424)).
- Small performance improvement in encoding and decoding protocol messages
([#427](https://github.com/Shopify/sarama/pull/427)).
Bug Fixes:
- Fix a rare race condition in the client's background metadata refresher if
it happens to be activated while the client is being closed
([#422](https://github.com/Shopify/sarama/pull/422)).
#### Version 1.2.0 (2015-04-07)
Improvements:
- The producer's behaviour when `Flush.Frequency` is set is now more intuitive
([#389](https://github.com/Shopify/sarama/pull/389)).
- The producer is now somewhat more memory-efficient during and after retrying
messages due to an improved queue implementation
([#396](https://github.com/Shopify/sarama/pull/396)).
- The consumer produces much more useful logging output when leadership
changes ([#385](https://github.com/Shopify/sarama/pull/385)).
- The client's `GetOffset` method will now automatically refresh metadata and
retry once in the event of stale information or similar
([#394](https://github.com/Shopify/sarama/pull/394)).
- Broker connections now have support for using TCP keepalives
([#407](https://github.com/Shopify/sarama/issues/407)).
Bug Fixes:
- The OffsetCommitRequest message now correctly implements all three possible
API versions ([#390](https://github.com/Shopify/sarama/pull/390),
[#400](https://github.com/Shopify/sarama/pull/400)).
#### Version 1.1.0 (2015-03-20)
Improvements:
- Wrap the producer's partitioner call in a circuit-breaker so that repeatedly
broken topics don't choke throughput
([#373](https://github.com/Shopify/sarama/pull/373)).
Bug Fixes:
- Fix the producer's internal reference counting in certain unusual scenarios
([#367](https://github.com/Shopify/sarama/pull/367)).
- Fix the consumer's internal reference counting in certain unusual scenarios
([#369](https://github.com/Shopify/sarama/pull/369)).
- Fix a condition where the producer's internal control messages could have
gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)).
- Fix an issue where invalid partition lists would be cached when asking for
metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)).
#### Version 1.0.0 (2015-03-17)
Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are:
- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking.
- The consumer has been rewritten to only open one connection per broker instead of one connection per partition.
- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package.
- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you.
- All the configuration values have been unified in the `Config` struct.
- Much improved test suite.

20
vendor/github.com/Shopify/sarama/LICENSE generated vendored Normal file
View File

@ -0,0 +1,20 @@
Copyright (c) 2013 Shopify
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

27
vendor/github.com/Shopify/sarama/Makefile generated vendored Normal file
View File

@ -0,0 +1,27 @@
default: fmt get update test lint
GO := GO111MODULE=on GOPRIVATE=github.com/linkedin GOSUMDB=off go
GOBUILD := CGO_ENABLED=0 $(GO) build $(BUILD_FLAG)
GOTEST := $(GO) test -gcflags='-l' -p 3 -v -race -timeout 6m -coverprofile=profile.out -covermode=atomic
FILES := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -not -name '*_test.go')
TESTS := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -name '*_test.go')
get:
$(GO) get ./...
$(GO) mod verify
$(GO) mod tidy
update:
$(GO) get -u -v all
$(GO) mod verify
$(GO) mod tidy
fmt:
gofmt -s -l -w $(FILES) $(TESTS)
lint:
golangci-lint run
test:
$(GOTEST) ./...

36
vendor/github.com/Shopify/sarama/README.md generated vendored Normal file
View File

@ -0,0 +1,36 @@
# sarama
[![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.svg)](https://godoc.org/github.com/Shopify/sarama)
[![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama)
[![Coverage](https://codecov.io/gh/Shopify/sarama/branch/master/graph/badge.svg)](https://codecov.io/gh/Shopify/sarama)
Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later).
## Getting started
- API documentation and examples are available via [godoc](https://godoc.org/github.com/Shopify/sarama).
- Mocks for testing are available in the [mocks](./mocks) subpackage.
- The [examples](./examples) directory contains more elaborate example applications.
- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation.
You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions).
## Compatibility and API stability
Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
the two latest stable releases of Kafka and Go, and we provide a two month
grace period for older releases. This means we currently officially support
Go 1.12 through 1.14, and Kafka 2.1 through 2.4, although older releases are
still likely to work.
Sarama follows semantic versioning and provides API stability via the gopkg.in service.
You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1.
A changelog is available [here](CHANGELOG.md).
## Contributing
- Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md).
- Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more technical and design details.
- The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) contains a wealth of useful information.
- For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers.
- If you have any questions, just ask!

14
vendor/github.com/Shopify/sarama/Vagrantfile generated vendored Normal file
View File

@ -0,0 +1,14 @@
# We have 5 * 192MB ZK processes and 5 * 320MB Kafka processes => 2560MB
MEMORY = 3072
Vagrant.configure("2") do |config|
config.vm.box = "ubuntu/bionic64"
config.vm.provision :shell, path: "vagrant/provision.sh"
config.vm.network "private_network", ip: "192.168.100.67"
config.vm.provider "virtualbox" do |v|
v.memory = MEMORY
end
end

138
vendor/github.com/Shopify/sarama/acl_bindings.go generated vendored Normal file
View File

@ -0,0 +1,138 @@
package sarama
//Resource holds information about acl resource type
type Resource struct {
ResourceType AclResourceType
ResourceName string
ResourcePatternType AclResourcePatternType
}
func (r *Resource) encode(pe packetEncoder, version int16) error {
pe.putInt8(int8(r.ResourceType))
if err := pe.putString(r.ResourceName); err != nil {
return err
}
if version == 1 {
if r.ResourcePatternType == AclPatternUnknown {
Logger.Print("Cannot encode an unknown resource pattern type, using Literal instead")
r.ResourcePatternType = AclPatternLiteral
}
pe.putInt8(int8(r.ResourcePatternType))
}
return nil
}
func (r *Resource) decode(pd packetDecoder, version int16) (err error) {
resourceType, err := pd.getInt8()
if err != nil {
return err
}
r.ResourceType = AclResourceType(resourceType)
if r.ResourceName, err = pd.getString(); err != nil {
return err
}
if version == 1 {
pattern, err := pd.getInt8()
if err != nil {
return err
}
r.ResourcePatternType = AclResourcePatternType(pattern)
}
return nil
}
//Acl holds information about acl type
type Acl struct {
Principal string
Host string
Operation AclOperation
PermissionType AclPermissionType
}
func (a *Acl) encode(pe packetEncoder) error {
if err := pe.putString(a.Principal); err != nil {
return err
}
if err := pe.putString(a.Host); err != nil {
return err
}
pe.putInt8(int8(a.Operation))
pe.putInt8(int8(a.PermissionType))
return nil
}
func (a *Acl) decode(pd packetDecoder, version int16) (err error) {
if a.Principal, err = pd.getString(); err != nil {
return err
}
if a.Host, err = pd.getString(); err != nil {
return err
}
operation, err := pd.getInt8()
if err != nil {
return err
}
a.Operation = AclOperation(operation)
permissionType, err := pd.getInt8()
if err != nil {
return err
}
a.PermissionType = AclPermissionType(permissionType)
return nil
}
//ResourceAcls is an acl resource type
type ResourceAcls struct {
Resource
Acls []*Acl
}
func (r *ResourceAcls) encode(pe packetEncoder, version int16) error {
if err := r.Resource.encode(pe, version); err != nil {
return err
}
if err := pe.putArrayLength(len(r.Acls)); err != nil {
return err
}
for _, acl := range r.Acls {
if err := acl.encode(pe); err != nil {
return err
}
}
return nil
}
func (r *ResourceAcls) decode(pd packetDecoder, version int16) error {
if err := r.Resource.decode(pd, version); err != nil {
return err
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Acls = make([]*Acl, n)
for i := 0; i < n; i++ {
r.Acls[i] = new(Acl)
if err := r.Acls[i].decode(pd, version); err != nil {
return err
}
}
return nil
}

89
vendor/github.com/Shopify/sarama/acl_create_request.go generated vendored Normal file
View File

@ -0,0 +1,89 @@
package sarama
//CreateAclsRequest is an acl creation request
type CreateAclsRequest struct {
Version int16
AclCreations []*AclCreation
}
func (c *CreateAclsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(c.AclCreations)); err != nil {
return err
}
for _, aclCreation := range c.AclCreations {
if err := aclCreation.encode(pe, c.Version); err != nil {
return err
}
}
return nil
}
func (c *CreateAclsRequest) decode(pd packetDecoder, version int16) (err error) {
c.Version = version
n, err := pd.getArrayLength()
if err != nil {
return err
}
c.AclCreations = make([]*AclCreation, n)
for i := 0; i < n; i++ {
c.AclCreations[i] = new(AclCreation)
if err := c.AclCreations[i].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (c *CreateAclsRequest) key() int16 {
return 30
}
func (c *CreateAclsRequest) version() int16 {
return c.Version
}
func (c *CreateAclsRequest) headerVersion() int16 {
return 1
}
func (c *CreateAclsRequest) requiredVersion() KafkaVersion {
switch c.Version {
case 1:
return V2_0_0_0
default:
return V0_11_0_0
}
}
//AclCreation is a wrapper around Resource and Acl type
type AclCreation struct {
Resource
Acl
}
func (a *AclCreation) encode(pe packetEncoder, version int16) error {
if err := a.Resource.encode(pe, version); err != nil {
return err
}
if err := a.Acl.encode(pe); err != nil {
return err
}
return nil
}
func (a *AclCreation) decode(pd packetDecoder, version int16) (err error) {
if err := a.Resource.decode(pd, version); err != nil {
return err
}
if err := a.Acl.decode(pd, version); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,94 @@
package sarama
import "time"
//CreateAclsResponse is a an acl response creation type
type CreateAclsResponse struct {
ThrottleTime time.Duration
AclCreationResponses []*AclCreationResponse
}
func (c *CreateAclsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(c.AclCreationResponses)); err != nil {
return err
}
for _, aclCreationResponse := range c.AclCreationResponses {
if err := aclCreationResponse.encode(pe); err != nil {
return err
}
}
return nil
}
func (c *CreateAclsResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
c.AclCreationResponses = make([]*AclCreationResponse, n)
for i := 0; i < n; i++ {
c.AclCreationResponses[i] = new(AclCreationResponse)
if err := c.AclCreationResponses[i].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (c *CreateAclsResponse) key() int16 {
return 30
}
func (c *CreateAclsResponse) version() int16 {
return 0
}
func (c *CreateAclsResponse) headerVersion() int16 {
return 0
}
func (c *CreateAclsResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}
//AclCreationResponse is an acl creation response type
type AclCreationResponse struct {
Err KError
ErrMsg *string
}
func (a *AclCreationResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(a.Err))
if err := pe.putNullableString(a.ErrMsg); err != nil {
return err
}
return nil
}
func (a *AclCreationResponse) decode(pd packetDecoder, version int16) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
a.Err = KError(kerr)
if a.ErrMsg, err = pd.getNullableString(); err != nil {
return err
}
return nil
}

62
vendor/github.com/Shopify/sarama/acl_delete_request.go generated vendored Normal file
View File

@ -0,0 +1,62 @@
package sarama
//DeleteAclsRequest is a delete acl request
type DeleteAclsRequest struct {
Version int
Filters []*AclFilter
}
func (d *DeleteAclsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(d.Filters)); err != nil {
return err
}
for _, filter := range d.Filters {
filter.Version = d.Version
if err := filter.encode(pe); err != nil {
return err
}
}
return nil
}
func (d *DeleteAclsRequest) decode(pd packetDecoder, version int16) (err error) {
d.Version = int(version)
n, err := pd.getArrayLength()
if err != nil {
return err
}
d.Filters = make([]*AclFilter, n)
for i := 0; i < n; i++ {
d.Filters[i] = new(AclFilter)
d.Filters[i].Version = int(version)
if err := d.Filters[i].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (d *DeleteAclsRequest) key() int16 {
return 31
}
func (d *DeleteAclsRequest) version() int16 {
return int16(d.Version)
}
func (c *DeleteAclsRequest) headerVersion() int16 {
return 1
}
func (d *DeleteAclsRequest) requiredVersion() KafkaVersion {
switch d.Version {
case 1:
return V2_0_0_0
default:
return V0_11_0_0
}
}

163
vendor/github.com/Shopify/sarama/acl_delete_response.go generated vendored Normal file
View File

@ -0,0 +1,163 @@
package sarama
import "time"
//DeleteAclsResponse is a delete acl response
type DeleteAclsResponse struct {
Version int16
ThrottleTime time.Duration
FilterResponses []*FilterResponse
}
func (d *DeleteAclsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(d.FilterResponses)); err != nil {
return err
}
for _, filterResponse := range d.FilterResponses {
if err := filterResponse.encode(pe, d.Version); err != nil {
return err
}
}
return nil
}
func (d *DeleteAclsResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
d.FilterResponses = make([]*FilterResponse, n)
for i := 0; i < n; i++ {
d.FilterResponses[i] = new(FilterResponse)
if err := d.FilterResponses[i].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (d *DeleteAclsResponse) key() int16 {
return 31
}
func (d *DeleteAclsResponse) version() int16 {
return d.Version
}
func (d *DeleteAclsResponse) headerVersion() int16 {
return 0
}
func (d *DeleteAclsResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}
//FilterResponse is a filter response type
type FilterResponse struct {
Err KError
ErrMsg *string
MatchingAcls []*MatchingAcl
}
func (f *FilterResponse) encode(pe packetEncoder, version int16) error {
pe.putInt16(int16(f.Err))
if err := pe.putNullableString(f.ErrMsg); err != nil {
return err
}
if err := pe.putArrayLength(len(f.MatchingAcls)); err != nil {
return err
}
for _, matchingAcl := range f.MatchingAcls {
if err := matchingAcl.encode(pe, version); err != nil {
return err
}
}
return nil
}
func (f *FilterResponse) decode(pd packetDecoder, version int16) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
f.Err = KError(kerr)
if f.ErrMsg, err = pd.getNullableString(); err != nil {
return err
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
f.MatchingAcls = make([]*MatchingAcl, n)
for i := 0; i < n; i++ {
f.MatchingAcls[i] = new(MatchingAcl)
if err := f.MatchingAcls[i].decode(pd, version); err != nil {
return err
}
}
return nil
}
//MatchingAcl is a matching acl type
type MatchingAcl struct {
Err KError
ErrMsg *string
Resource
Acl
}
func (m *MatchingAcl) encode(pe packetEncoder, version int16) error {
pe.putInt16(int16(m.Err))
if err := pe.putNullableString(m.ErrMsg); err != nil {
return err
}
if err := m.Resource.encode(pe, version); err != nil {
return err
}
if err := m.Acl.encode(pe); err != nil {
return err
}
return nil
}
func (m *MatchingAcl) decode(pd packetDecoder, version int16) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
m.Err = KError(kerr)
if m.ErrMsg, err = pd.getNullableString(); err != nil {
return err
}
if err := m.Resource.decode(pd, version); err != nil {
return err
}
if err := m.Acl.decode(pd, version); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,39 @@
package sarama
//DescribeAclsRequest is a secribe acl request type
type DescribeAclsRequest struct {
Version int
AclFilter
}
func (d *DescribeAclsRequest) encode(pe packetEncoder) error {
d.AclFilter.Version = d.Version
return d.AclFilter.encode(pe)
}
func (d *DescribeAclsRequest) decode(pd packetDecoder, version int16) (err error) {
d.Version = int(version)
d.AclFilter.Version = int(version)
return d.AclFilter.decode(pd, version)
}
func (d *DescribeAclsRequest) key() int16 {
return 29
}
func (d *DescribeAclsRequest) version() int16 {
return int16(d.Version)
}
func (d *DescribeAclsRequest) headerVersion() int16 {
return 1
}
func (d *DescribeAclsRequest) requiredVersion() KafkaVersion {
switch d.Version {
case 1:
return V2_0_0_0
default:
return V0_11_0_0
}
}

View File

@ -0,0 +1,91 @@
package sarama
import "time"
//DescribeAclsResponse is a describe acl response type
type DescribeAclsResponse struct {
Version int16
ThrottleTime time.Duration
Err KError
ErrMsg *string
ResourceAcls []*ResourceAcls
}
func (d *DescribeAclsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
pe.putInt16(int16(d.Err))
if err := pe.putNullableString(d.ErrMsg); err != nil {
return err
}
if err := pe.putArrayLength(len(d.ResourceAcls)); err != nil {
return err
}
for _, resourceAcl := range d.ResourceAcls {
if err := resourceAcl.encode(pe, d.Version); err != nil {
return err
}
}
return nil
}
func (d *DescribeAclsResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
kerr, err := pd.getInt16()
if err != nil {
return err
}
d.Err = KError(kerr)
errmsg, err := pd.getString()
if err != nil {
return err
}
if errmsg != "" {
d.ErrMsg = &errmsg
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
d.ResourceAcls = make([]*ResourceAcls, n)
for i := 0; i < n; i++ {
d.ResourceAcls[i] = new(ResourceAcls)
if err := d.ResourceAcls[i].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (d *DescribeAclsResponse) key() int16 {
return 29
}
func (d *DescribeAclsResponse) version() int16 {
return d.Version
}
func (d *DescribeAclsResponse) headerVersion() int16 {
return 0
}
func (d *DescribeAclsResponse) requiredVersion() KafkaVersion {
switch d.Version {
case 1:
return V2_0_0_0
default:
return V0_11_0_0
}
}

78
vendor/github.com/Shopify/sarama/acl_filter.go generated vendored Normal file
View File

@ -0,0 +1,78 @@
package sarama
type AclFilter struct {
Version int
ResourceType AclResourceType
ResourceName *string
ResourcePatternTypeFilter AclResourcePatternType
Principal *string
Host *string
Operation AclOperation
PermissionType AclPermissionType
}
func (a *AclFilter) encode(pe packetEncoder) error {
pe.putInt8(int8(a.ResourceType))
if err := pe.putNullableString(a.ResourceName); err != nil {
return err
}
if a.Version == 1 {
pe.putInt8(int8(a.ResourcePatternTypeFilter))
}
if err := pe.putNullableString(a.Principal); err != nil {
return err
}
if err := pe.putNullableString(a.Host); err != nil {
return err
}
pe.putInt8(int8(a.Operation))
pe.putInt8(int8(a.PermissionType))
return nil
}
func (a *AclFilter) decode(pd packetDecoder, version int16) (err error) {
resourceType, err := pd.getInt8()
if err != nil {
return err
}
a.ResourceType = AclResourceType(resourceType)
if a.ResourceName, err = pd.getNullableString(); err != nil {
return err
}
if a.Version == 1 {
pattern, err := pd.getInt8()
if err != nil {
return err
}
a.ResourcePatternTypeFilter = AclResourcePatternType(pattern)
}
if a.Principal, err = pd.getNullableString(); err != nil {
return err
}
if a.Host, err = pd.getNullableString(); err != nil {
return err
}
operation, err := pd.getInt8()
if err != nil {
return err
}
a.Operation = AclOperation(operation)
permissionType, err := pd.getInt8()
if err != nil {
return err
}
a.PermissionType = AclPermissionType(permissionType)
return nil
}

55
vendor/github.com/Shopify/sarama/acl_types.go generated vendored Normal file
View File

@ -0,0 +1,55 @@
package sarama
type (
AclOperation int
AclPermissionType int
AclResourceType int
AclResourcePatternType int
)
// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclOperation.java
const (
AclOperationUnknown AclOperation = iota
AclOperationAny
AclOperationAll
AclOperationRead
AclOperationWrite
AclOperationCreate
AclOperationDelete
AclOperationAlter
AclOperationDescribe
AclOperationClusterAction
AclOperationDescribeConfigs
AclOperationAlterConfigs
AclOperationIdempotentWrite
)
// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclPermissionType.java
const (
AclPermissionUnknown AclPermissionType = iota
AclPermissionAny
AclPermissionDeny
AclPermissionAllow
)
// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java
const (
AclResourceUnknown AclResourceType = iota
AclResourceAny
AclResourceTopic
AclResourceGroup
AclResourceCluster
AclResourceTransactionalID
)
// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/PatternType.java
const (
AclPatternUnknown AclResourcePatternType = iota
AclPatternAny
AclPatternMatch
AclPatternLiteral
AclPatternPrefixed
)

View File

@ -0,0 +1,57 @@
package sarama
//AddOffsetsToTxnRequest adds offsets to a transaction request
type AddOffsetsToTxnRequest struct {
TransactionalID string
ProducerID int64
ProducerEpoch int16
GroupID string
}
func (a *AddOffsetsToTxnRequest) encode(pe packetEncoder) error {
if err := pe.putString(a.TransactionalID); err != nil {
return err
}
pe.putInt64(a.ProducerID)
pe.putInt16(a.ProducerEpoch)
if err := pe.putString(a.GroupID); err != nil {
return err
}
return nil
}
func (a *AddOffsetsToTxnRequest) decode(pd packetDecoder, version int16) (err error) {
if a.TransactionalID, err = pd.getString(); err != nil {
return err
}
if a.ProducerID, err = pd.getInt64(); err != nil {
return err
}
if a.ProducerEpoch, err = pd.getInt16(); err != nil {
return err
}
if a.GroupID, err = pd.getString(); err != nil {
return err
}
return nil
}
func (a *AddOffsetsToTxnRequest) key() int16 {
return 25
}
func (a *AddOffsetsToTxnRequest) version() int16 {
return 0
}
func (a *AddOffsetsToTxnRequest) headerVersion() int16 {
return 1
}
func (a *AddOffsetsToTxnRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View File

@ -0,0 +1,49 @@
package sarama
import (
"time"
)
//AddOffsetsToTxnResponse is a response type for adding offsets to txns
type AddOffsetsToTxnResponse struct {
ThrottleTime time.Duration
Err KError
}
func (a *AddOffsetsToTxnResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
pe.putInt16(int16(a.Err))
return nil
}
func (a *AddOffsetsToTxnResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
kerr, err := pd.getInt16()
if err != nil {
return err
}
a.Err = KError(kerr)
return nil
}
func (a *AddOffsetsToTxnResponse) key() int16 {
return 25
}
func (a *AddOffsetsToTxnResponse) version() int16 {
return 0
}
func (a *AddOffsetsToTxnResponse) headerVersion() int16 {
return 0
}
func (a *AddOffsetsToTxnResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View File

@ -0,0 +1,81 @@
package sarama
//AddPartitionsToTxnRequest is a add paartition request
type AddPartitionsToTxnRequest struct {
TransactionalID string
ProducerID int64
ProducerEpoch int16
TopicPartitions map[string][]int32
}
func (a *AddPartitionsToTxnRequest) encode(pe packetEncoder) error {
if err := pe.putString(a.TransactionalID); err != nil {
return err
}
pe.putInt64(a.ProducerID)
pe.putInt16(a.ProducerEpoch)
if err := pe.putArrayLength(len(a.TopicPartitions)); err != nil {
return err
}
for topic, partitions := range a.TopicPartitions {
if err := pe.putString(topic); err != nil {
return err
}
if err := pe.putInt32Array(partitions); err != nil {
return err
}
}
return nil
}
func (a *AddPartitionsToTxnRequest) decode(pd packetDecoder, version int16) (err error) {
if a.TransactionalID, err = pd.getString(); err != nil {
return err
}
if a.ProducerID, err = pd.getInt64(); err != nil {
return err
}
if a.ProducerEpoch, err = pd.getInt16(); err != nil {
return err
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
a.TopicPartitions = make(map[string][]int32)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
partitions, err := pd.getInt32Array()
if err != nil {
return err
}
a.TopicPartitions[topic] = partitions
}
return nil
}
func (a *AddPartitionsToTxnRequest) key() int16 {
return 24
}
func (a *AddPartitionsToTxnRequest) version() int16 {
return 0
}
func (a *AddPartitionsToTxnRequest) headerVersion() int16 {
return 1
}
func (a *AddPartitionsToTxnRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View File

@ -0,0 +1,114 @@
package sarama
import (
"time"
)
//AddPartitionsToTxnResponse is a partition errors to transaction type
type AddPartitionsToTxnResponse struct {
ThrottleTime time.Duration
Errors map[string][]*PartitionError
}
func (a *AddPartitionsToTxnResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(a.Errors)); err != nil {
return err
}
for topic, e := range a.Errors {
if err := pe.putString(topic); err != nil {
return err
}
if err := pe.putArrayLength(len(e)); err != nil {
return err
}
for _, partitionError := range e {
if err := partitionError.encode(pe); err != nil {
return err
}
}
}
return nil
}
func (a *AddPartitionsToTxnResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
a.Errors = make(map[string][]*PartitionError)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
m, err := pd.getArrayLength()
if err != nil {
return err
}
a.Errors[topic] = make([]*PartitionError, m)
for j := 0; j < m; j++ {
a.Errors[topic][j] = new(PartitionError)
if err := a.Errors[topic][j].decode(pd, version); err != nil {
return err
}
}
}
return nil
}
func (a *AddPartitionsToTxnResponse) key() int16 {
return 24
}
func (a *AddPartitionsToTxnResponse) version() int16 {
return 0
}
func (a *AddPartitionsToTxnResponse) headerVersion() int16 {
return 0
}
func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}
//PartitionError is a partition error type
type PartitionError struct {
Partition int32
Err KError
}
func (p *PartitionError) encode(pe packetEncoder) error {
pe.putInt32(p.Partition)
pe.putInt16(int16(p.Err))
return nil
}
func (p *PartitionError) decode(pd packetDecoder, version int16) (err error) {
if p.Partition, err = pd.getInt32(); err != nil {
return err
}
kerr, err := pd.getInt16()
if err != nil {
return err
}
p.Err = KError(kerr)
return nil
}

934
vendor/github.com/Shopify/sarama/admin.go generated vendored Normal file
View File

@ -0,0 +1,934 @@
package sarama
import (
"errors"
"fmt"
"math/rand"
"strconv"
"sync"
"time"
)
// ClusterAdmin is the administrative client for Kafka, which supports managing and inspecting topics,
// brokers, configurations and ACLs. The minimum broker version required is 0.10.0.0.
// Methods with stricter requirements will specify the minimum broker version required.
// You MUST call Close() on a client to avoid leaks
type ClusterAdmin interface {
// Creates a new topic. This operation is supported by brokers with version 0.10.1.0 or higher.
// It may take several seconds after CreateTopic returns success for all the brokers
// to become aware that the topic has been created. During this time, listTopics
// may not return information about the new topic.The validateOnly option is supported from version 0.10.2.0.
CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error
// List the topics available in the cluster with the default options.
ListTopics() (map[string]TopicDetail, error)
// Describe some topics in the cluster.
DescribeTopics(topics []string) (metadata []*TopicMetadata, err error)
// Delete a topic. It may take several seconds after the DeleteTopic to returns success
// and for all the brokers to become aware that the topics are gone.
// During this time, listTopics may continue to return information about the deleted topic.
// If delete.topic.enable is false on the brokers, deleteTopic will mark
// the topic for deletion, but not actually delete them.
// This operation is supported by brokers with version 0.10.1.0 or higher.
DeleteTopic(topic string) error
// Increase the number of partitions of the topics according to the corresponding values.
// If partitions are increased for a topic that has a key, the partition logic or ordering of
// the messages will be affected. It may take several seconds after this method returns
// success for all the brokers to become aware that the partitions have been created.
// During this time, ClusterAdmin#describeTopics may not return information about the
// new partitions. This operation is supported by brokers with version 1.0.0 or higher.
CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error
// Alter the replica assignment for partitions.
// This operation is supported by brokers with version 2.4.0.0 or higher.
AlterPartitionReassignments(topic string, assignment [][]int32) error
// Provides info on ongoing partitions replica reassignments.
// This operation is supported by brokers with version 2.4.0.0 or higher.
ListPartitionReassignments(topics string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error)
// Delete records whose offset is smaller than the given offset of the corresponding partition.
// This operation is supported by brokers with version 0.11.0.0 or higher.
DeleteRecords(topic string, partitionOffsets map[int32]int64) error
// Get the configuration for the specified resources.
// The returned configuration includes default values and the Default is true
// can be used to distinguish them from user supplied values.
// Config entries where ReadOnly is true cannot be updated.
// The value of config entries where Sensitive is true is always nil so
// sensitive information is not disclosed.
// This operation is supported by brokers with version 0.11.0.0 or higher.
DescribeConfig(resource ConfigResource) ([]ConfigEntry, error)
// Update the configuration for the specified resources with the default options.
// This operation is supported by brokers with version 0.11.0.0 or higher.
// The resources with their configs (topic is the only resource type with configs
// that can be updated currently Updates are not transactional so they may succeed
// for some resources while fail for others. The configs for a particular resource are updated automatically.
AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error
// Creates access control lists (ACLs) which are bound to specific resources.
// This operation is not transactional so it may succeed for some ACLs while fail for others.
// If you attempt to add an ACL that duplicates an existing ACL, no error will be raised, but
// no changes will be made. This operation is supported by brokers with version 0.11.0.0 or higher.
CreateACL(resource Resource, acl Acl) error
// Lists access control lists (ACLs) according to the supplied filter.
// it may take some time for changes made by createAcls or deleteAcls to be reflected in the output of ListAcls
// This operation is supported by brokers with version 0.11.0.0 or higher.
ListAcls(filter AclFilter) ([]ResourceAcls, error)
// Deletes access control lists (ACLs) according to the supplied filters.
// This operation is not transactional so it may succeed for some ACLs while fail for others.
// This operation is supported by brokers with version 0.11.0.0 or higher.
DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error)
// List the consumer groups available in the cluster.
ListConsumerGroups() (map[string]string, error)
// Describe the given consumer groups.
DescribeConsumerGroups(groups []string) ([]*GroupDescription, error)
// List the consumer group offsets available in the cluster.
ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error)
// Delete a consumer group.
DeleteConsumerGroup(group string) error
// Get information about the nodes in the cluster
DescribeCluster() (brokers []*Broker, controllerID int32, err error)
// Get information about all log directories on the given set of brokers
DescribeLogDirs(brokers []int32) (map[int32][]DescribeLogDirsResponseDirMetadata, error)
// Close shuts down the admin and closes underlying client.
Close() error
}
type clusterAdmin struct {
client Client
conf *Config
}
// NewClusterAdmin creates a new ClusterAdmin using the given broker addresses and configuration.
func NewClusterAdmin(addrs []string, conf *Config) (ClusterAdmin, error) {
client, err := NewClient(addrs, conf)
if err != nil {
return nil, err
}
return NewClusterAdminFromClient(client)
}
// NewClusterAdminFromClient creates a new ClusterAdmin using the given client.
// Note that underlying client will also be closed on admin's Close() call.
func NewClusterAdminFromClient(client Client) (ClusterAdmin, error) {
//make sure we can retrieve the controller
_, err := client.Controller()
if err != nil {
return nil, err
}
ca := &clusterAdmin{
client: client,
conf: client.Config(),
}
return ca, nil
}
func (ca *clusterAdmin) Close() error {
return ca.client.Close()
}
func (ca *clusterAdmin) Controller() (*Broker, error) {
return ca.client.Controller()
}
func (ca *clusterAdmin) refreshController() (*Broker, error) {
return ca.client.RefreshController()
}
// isErrNoController returns `true` if the given error type unwraps to an
// `ErrNotController` response from Kafka
func isErrNoController(err error) bool {
switch e := err.(type) {
case *TopicError:
return e.Err == ErrNotController
case *TopicPartitionError:
return e.Err == ErrNotController
case KError:
return e == ErrNotController
}
return false
}
// retryOnError will repeatedly call the given (error-returning) func in the
// case that its response is non-nil and retriable (as determined by the
// provided retriable func) up to the maximum number of tries permitted by
// the admin client configuration
func (ca *clusterAdmin) retryOnError(retriable func(error) bool, fn func() error) error {
var err error
for attempt := 0; attempt < ca.conf.Admin.Retry.Max; attempt++ {
err = fn()
if err == nil || !retriable(err) {
return err
}
Logger.Printf(
"admin/request retrying after %dms... (%d attempts remaining)\n",
ca.conf.Admin.Retry.Backoff/time.Millisecond, ca.conf.Admin.Retry.Max-attempt)
time.Sleep(ca.conf.Admin.Retry.Backoff)
continue
}
return err
}
func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error {
if topic == "" {
return ErrInvalidTopic
}
if detail == nil {
return errors.New("you must specify topic details")
}
topicDetails := make(map[string]*TopicDetail)
topicDetails[topic] = detail
request := &CreateTopicsRequest{
TopicDetails: topicDetails,
ValidateOnly: validateOnly,
Timeout: ca.conf.Admin.Timeout,
}
if ca.conf.Version.IsAtLeast(V0_11_0_0) {
request.Version = 1
}
if ca.conf.Version.IsAtLeast(V1_0_0_0) {
request.Version = 2
}
return ca.retryOnError(isErrNoController, func() error {
b, err := ca.Controller()
if err != nil {
return err
}
rsp, err := b.CreateTopics(request)
if err != nil {
return err
}
topicErr, ok := rsp.TopicErrors[topic]
if !ok {
return ErrIncompleteResponse
}
if topicErr.Err != ErrNoError {
if topicErr.Err == ErrNotController {
_, _ = ca.refreshController()
}
return topicErr
}
return nil
})
}
func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) {
controller, err := ca.Controller()
if err != nil {
return nil, err
}
request := &MetadataRequest{
Topics: topics,
AllowAutoTopicCreation: false,
}
if ca.conf.Version.IsAtLeast(V1_0_0_0) {
request.Version = 5
} else if ca.conf.Version.IsAtLeast(V0_11_0_0) {
request.Version = 4
}
response, err := controller.GetMetadata(request)
if err != nil {
return nil, err
}
return response.Topics, nil
}
func (ca *clusterAdmin) DescribeCluster() (brokers []*Broker, controllerID int32, err error) {
controller, err := ca.Controller()
if err != nil {
return nil, int32(0), err
}
request := &MetadataRequest{
Topics: []string{},
}
if ca.conf.Version.IsAtLeast(V0_10_0_0) {
request.Version = 1
}
response, err := controller.GetMetadata(request)
if err != nil {
return nil, int32(0), err
}
return response.Brokers, response.ControllerID, nil
}
func (ca *clusterAdmin) findBroker(id int32) (*Broker, error) {
brokers := ca.client.Brokers()
for _, b := range brokers {
if b.ID() == id {
return b, nil
}
}
return nil, fmt.Errorf("could not find broker id %d", id)
}
func (ca *clusterAdmin) findAnyBroker() (*Broker, error) {
brokers := ca.client.Brokers()
if len(brokers) > 0 {
index := rand.Intn(len(brokers))
return brokers[index], nil
}
return nil, errors.New("no available broker")
}
func (ca *clusterAdmin) ListTopics() (map[string]TopicDetail, error) {
// In order to build TopicDetails we need to first get the list of all
// topics using a MetadataRequest and then get their configs using a
// DescribeConfigsRequest request. To avoid sending many requests to the
// broker, we use a single DescribeConfigsRequest.
// Send the all-topic MetadataRequest
b, err := ca.findAnyBroker()
if err != nil {
return nil, err
}
_ = b.Open(ca.client.Config())
metadataReq := &MetadataRequest{}
metadataResp, err := b.GetMetadata(metadataReq)
if err != nil {
return nil, err
}
topicsDetailsMap := make(map[string]TopicDetail)
var describeConfigsResources []*ConfigResource
for _, topic := range metadataResp.Topics {
topicDetails := TopicDetail{
NumPartitions: int32(len(topic.Partitions)),
}
if len(topic.Partitions) > 0 {
topicDetails.ReplicaAssignment = map[int32][]int32{}
for _, partition := range topic.Partitions {
topicDetails.ReplicaAssignment[partition.ID] = partition.Replicas
}
topicDetails.ReplicationFactor = int16(len(topic.Partitions[0].Replicas))
}
topicsDetailsMap[topic.Name] = topicDetails
// we populate the resources we want to describe from the MetadataResponse
topicResource := ConfigResource{
Type: TopicResource,
Name: topic.Name,
}
describeConfigsResources = append(describeConfigsResources, &topicResource)
}
// Send the DescribeConfigsRequest
describeConfigsReq := &DescribeConfigsRequest{
Resources: describeConfigsResources,
}
if ca.conf.Version.IsAtLeast(V1_1_0_0) {
describeConfigsReq.Version = 1
}
if ca.conf.Version.IsAtLeast(V2_0_0_0) {
describeConfigsReq.Version = 2
}
describeConfigsResp, err := b.DescribeConfigs(describeConfigsReq)
if err != nil {
return nil, err
}
for _, resource := range describeConfigsResp.Resources {
topicDetails := topicsDetailsMap[resource.Name]
topicDetails.ConfigEntries = make(map[string]*string)
for _, entry := range resource.Configs {
// only include non-default non-sensitive config
// (don't actually think topic config will ever be sensitive)
if entry.Default || entry.Sensitive {
continue
}
topicDetails.ConfigEntries[entry.Name] = &entry.Value
}
topicsDetailsMap[resource.Name] = topicDetails
}
return topicsDetailsMap, nil
}
func (ca *clusterAdmin) DeleteTopic(topic string) error {
if topic == "" {
return ErrInvalidTopic
}
request := &DeleteTopicsRequest{
Topics: []string{topic},
Timeout: ca.conf.Admin.Timeout,
}
if ca.conf.Version.IsAtLeast(V0_11_0_0) {
request.Version = 1
}
return ca.retryOnError(isErrNoController, func() error {
b, err := ca.Controller()
if err != nil {
return err
}
rsp, err := b.DeleteTopics(request)
if err != nil {
return err
}
topicErr, ok := rsp.TopicErrorCodes[topic]
if !ok {
return ErrIncompleteResponse
}
if topicErr != ErrNoError {
if topicErr == ErrNotController {
_, _ = ca.refreshController()
}
return topicErr
}
return nil
})
}
func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error {
if topic == "" {
return ErrInvalidTopic
}
topicPartitions := make(map[string]*TopicPartition)
topicPartitions[topic] = &TopicPartition{Count: count, Assignment: assignment}
request := &CreatePartitionsRequest{
TopicPartitions: topicPartitions,
Timeout: ca.conf.Admin.Timeout,
}
return ca.retryOnError(isErrNoController, func() error {
b, err := ca.Controller()
if err != nil {
return err
}
rsp, err := b.CreatePartitions(request)
if err != nil {
return err
}
topicErr, ok := rsp.TopicPartitionErrors[topic]
if !ok {
return ErrIncompleteResponse
}
if topicErr.Err != ErrNoError {
if topicErr.Err == ErrNotController {
_, _ = ca.refreshController()
}
return topicErr
}
return nil
})
}
func (ca *clusterAdmin) AlterPartitionReassignments(topic string, assignment [][]int32) error {
if topic == "" {
return ErrInvalidTopic
}
request := &AlterPartitionReassignmentsRequest{
TimeoutMs: int32(60000),
Version: int16(0),
}
for i := 0; i < len(assignment); i++ {
request.AddBlock(topic, int32(i), assignment[i])
}
return ca.retryOnError(isErrNoController, func() error {
b, err := ca.Controller()
if err != nil {
return err
}
errs := make([]error, 0)
rsp, err := b.AlterPartitionReassignments(request)
if err != nil {
errs = append(errs, err)
} else {
if rsp.ErrorCode > 0 {
errs = append(errs, errors.New(rsp.ErrorCode.Error()))
}
for topic, topicErrors := range rsp.Errors {
for partition, partitionError := range topicErrors {
if partitionError.errorCode != ErrNoError {
errStr := fmt.Sprintf("[%s-%d]: %s", topic, partition, partitionError.errorCode.Error())
errs = append(errs, errors.New(errStr))
}
}
}
}
if len(errs) > 0 {
return ErrReassignPartitions{MultiError{&errs}}
}
return nil
})
}
func (ca *clusterAdmin) ListPartitionReassignments(topic string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error) {
if topic == "" {
return nil, ErrInvalidTopic
}
request := &ListPartitionReassignmentsRequest{
TimeoutMs: int32(60000),
Version: int16(0),
}
request.AddBlock(topic, partitions)
b, err := ca.Controller()
if err != nil {
return nil, err
}
_ = b.Open(ca.client.Config())
rsp, err := b.ListPartitionReassignments(request)
if err == nil && rsp != nil {
return rsp.TopicStatus, nil
} else {
return nil, err
}
}
func (ca *clusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]int64) error {
if topic == "" {
return ErrInvalidTopic
}
partitionPerBroker := make(map[*Broker][]int32)
for partition := range partitionOffsets {
broker, err := ca.client.Leader(topic, partition)
if err != nil {
return err
}
if _, ok := partitionPerBroker[broker]; ok {
partitionPerBroker[broker] = append(partitionPerBroker[broker], partition)
} else {
partitionPerBroker[broker] = []int32{partition}
}
}
errs := make([]error, 0)
for broker, partitions := range partitionPerBroker {
topics := make(map[string]*DeleteRecordsRequestTopic)
recordsToDelete := make(map[int32]int64)
for _, p := range partitions {
recordsToDelete[p] = partitionOffsets[p]
}
topics[topic] = &DeleteRecordsRequestTopic{PartitionOffsets: recordsToDelete}
request := &DeleteRecordsRequest{
Topics: topics,
Timeout: ca.conf.Admin.Timeout,
}
rsp, err := broker.DeleteRecords(request)
if err != nil {
errs = append(errs, err)
} else {
deleteRecordsResponseTopic, ok := rsp.Topics[topic]
if !ok {
errs = append(errs, ErrIncompleteResponse)
} else {
for _, deleteRecordsResponsePartition := range deleteRecordsResponseTopic.Partitions {
if deleteRecordsResponsePartition.Err != ErrNoError {
errs = append(errs, errors.New(deleteRecordsResponsePartition.Err.Error()))
}
}
}
}
}
if len(errs) > 0 {
return ErrDeleteRecords{MultiError{&errs}}
}
//todo since we are dealing with couple of partitions it would be good if we return slice of errors
//for each partition instead of one error
return nil
}
// Returns a bool indicating whether the resource request needs to go to a
// specific broker
func dependsOnSpecificNode(resource ConfigResource) bool {
return (resource.Type == BrokerResource && resource.Name != "") ||
resource.Type == BrokerLoggerResource
}
func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, error) {
var entries []ConfigEntry
var resources []*ConfigResource
resources = append(resources, &resource)
request := &DescribeConfigsRequest{
Resources: resources,
}
if ca.conf.Version.IsAtLeast(V1_1_0_0) {
request.Version = 1
}
if ca.conf.Version.IsAtLeast(V2_0_0_0) {
request.Version = 2
}
var (
b *Broker
err error
)
// DescribeConfig of broker/broker logger must be sent to the broker in question
if dependsOnSpecificNode(resource) {
id, _ := strconv.Atoi(resource.Name)
b, err = ca.findBroker(int32(id))
} else {
b, err = ca.findAnyBroker()
}
if err != nil {
return nil, err
}
_ = b.Open(ca.client.Config())
rsp, err := b.DescribeConfigs(request)
if err != nil {
return nil, err
}
for _, rspResource := range rsp.Resources {
if rspResource.Name == resource.Name {
if rspResource.ErrorMsg != "" {
return nil, errors.New(rspResource.ErrorMsg)
}
if rspResource.ErrorCode != 0 {
return nil, KError(rspResource.ErrorCode)
}
for _, cfgEntry := range rspResource.Configs {
entries = append(entries, *cfgEntry)
}
}
}
return entries, nil
}
func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error {
var resources []*AlterConfigsResource
resources = append(resources, &AlterConfigsResource{
Type: resourceType,
Name: name,
ConfigEntries: entries,
})
request := &AlterConfigsRequest{
Resources: resources,
ValidateOnly: validateOnly,
}
var (
b *Broker
err error
)
// AlterConfig of broker/broker logger must be sent to the broker in question
if dependsOnSpecificNode(ConfigResource{Name: name, Type: resourceType}) {
id, _ := strconv.Atoi(name)
b, err = ca.findBroker(int32(id))
} else {
b, err = ca.findAnyBroker()
}
if err != nil {
return err
}
_ = b.Open(ca.client.Config())
rsp, err := b.AlterConfigs(request)
if err != nil {
return err
}
for _, rspResource := range rsp.Resources {
if rspResource.Name == name {
if rspResource.ErrorMsg != "" {
return errors.New(rspResource.ErrorMsg)
}
if rspResource.ErrorCode != 0 {
return KError(rspResource.ErrorCode)
}
}
}
return nil
}
func (ca *clusterAdmin) CreateACL(resource Resource, acl Acl) error {
var acls []*AclCreation
acls = append(acls, &AclCreation{resource, acl})
request := &CreateAclsRequest{AclCreations: acls}
if ca.conf.Version.IsAtLeast(V2_0_0_0) {
request.Version = 1
}
b, err := ca.Controller()
if err != nil {
return err
}
_, err = b.CreateAcls(request)
return err
}
func (ca *clusterAdmin) ListAcls(filter AclFilter) ([]ResourceAcls, error) {
request := &DescribeAclsRequest{AclFilter: filter}
if ca.conf.Version.IsAtLeast(V2_0_0_0) {
request.Version = 1
}
b, err := ca.Controller()
if err != nil {
return nil, err
}
rsp, err := b.DescribeAcls(request)
if err != nil {
return nil, err
}
var lAcls []ResourceAcls
for _, rAcl := range rsp.ResourceAcls {
lAcls = append(lAcls, *rAcl)
}
return lAcls, nil
}
func (ca *clusterAdmin) DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error) {
var filters []*AclFilter
filters = append(filters, &filter)
request := &DeleteAclsRequest{Filters: filters}
if ca.conf.Version.IsAtLeast(V2_0_0_0) {
request.Version = 1
}
b, err := ca.Controller()
if err != nil {
return nil, err
}
rsp, err := b.DeleteAcls(request)
if err != nil {
return nil, err
}
var mAcls []MatchingAcl
for _, fr := range rsp.FilterResponses {
for _, mACL := range fr.MatchingAcls {
mAcls = append(mAcls, *mACL)
}
}
return mAcls, nil
}
func (ca *clusterAdmin) DescribeConsumerGroups(groups []string) (result []*GroupDescription, err error) {
groupsPerBroker := make(map[*Broker][]string)
for _, group := range groups {
controller, err := ca.client.Coordinator(group)
if err != nil {
return nil, err
}
groupsPerBroker[controller] = append(groupsPerBroker[controller], group)
}
for broker, brokerGroups := range groupsPerBroker {
response, err := broker.DescribeGroups(&DescribeGroupsRequest{
Groups: brokerGroups,
})
if err != nil {
return nil, err
}
result = append(result, response.Groups...)
}
return result, nil
}
func (ca *clusterAdmin) ListConsumerGroups() (allGroups map[string]string, err error) {
allGroups = make(map[string]string)
// Query brokers in parallel, since we have to query *all* brokers
brokers := ca.client.Brokers()
groupMaps := make(chan map[string]string, len(brokers))
errors := make(chan error, len(brokers))
wg := sync.WaitGroup{}
for _, b := range brokers {
wg.Add(1)
go func(b *Broker, conf *Config) {
defer wg.Done()
_ = b.Open(conf) // Ensure that broker is opened
response, err := b.ListGroups(&ListGroupsRequest{})
if err != nil {
errors <- err
return
}
groups := make(map[string]string)
for group, typ := range response.Groups {
groups[group] = typ
}
groupMaps <- groups
}(b, ca.conf)
}
wg.Wait()
close(groupMaps)
close(errors)
for groupMap := range groupMaps {
for group, protocolType := range groupMap {
allGroups[group] = protocolType
}
}
// Intentionally return only the first error for simplicity
err = <-errors
return
}
func (ca *clusterAdmin) ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error) {
coordinator, err := ca.client.Coordinator(group)
if err != nil {
return nil, err
}
request := &OffsetFetchRequest{
ConsumerGroup: group,
partitions: topicPartitions,
}
if ca.conf.Version.IsAtLeast(V0_10_2_0) {
request.Version = 2
} else if ca.conf.Version.IsAtLeast(V0_8_2_2) {
request.Version = 1
}
return coordinator.FetchOffset(request)
}
func (ca *clusterAdmin) DeleteConsumerGroup(group string) error {
coordinator, err := ca.client.Coordinator(group)
if err != nil {
return err
}
request := &DeleteGroupsRequest{
Groups: []string{group},
}
resp, err := coordinator.DeleteGroups(request)
if err != nil {
return err
}
groupErr, ok := resp.GroupErrorCodes[group]
if !ok {
return ErrIncompleteResponse
}
if groupErr != ErrNoError {
return groupErr
}
return nil
}
func (ca *clusterAdmin) DescribeLogDirs(brokerIds []int32) (allLogDirs map[int32][]DescribeLogDirsResponseDirMetadata, err error) {
allLogDirs = make(map[int32][]DescribeLogDirsResponseDirMetadata)
// Query brokers in parallel, since we may have to query multiple brokers
logDirsMaps := make(chan map[int32][]DescribeLogDirsResponseDirMetadata, len(brokerIds))
errors := make(chan error, len(brokerIds))
wg := sync.WaitGroup{}
for _, b := range brokerIds {
wg.Add(1)
broker, err := ca.findBroker(b)
if err != nil {
Logger.Printf("Unable to find broker with ID = %v\n", b)
continue
}
go func(b *Broker, conf *Config) {
defer wg.Done()
_ = b.Open(conf) // Ensure that broker is opened
response, err := b.DescribeLogDirs(&DescribeLogDirsRequest{})
if err != nil {
errors <- err
return
}
logDirs := make(map[int32][]DescribeLogDirsResponseDirMetadata)
logDirs[b.ID()] = response.LogDirs
logDirsMaps <- logDirs
}(broker, ca.conf)
}
wg.Wait()
close(logDirsMaps)
close(errors)
for logDirsMap := range logDirsMaps {
for id, logDirs := range logDirsMap {
allLogDirs[id] = logDirs
}
}
// Intentionally return only the first error for simplicity
err = <-errors
return
}

View File

@ -0,0 +1,126 @@
package sarama
//AlterConfigsRequest is an alter config request type
type AlterConfigsRequest struct {
Resources []*AlterConfigsResource
ValidateOnly bool
}
//AlterConfigsResource is an alter config resource type
type AlterConfigsResource struct {
Type ConfigResourceType
Name string
ConfigEntries map[string]*string
}
func (a *AlterConfigsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(a.Resources)); err != nil {
return err
}
for _, r := range a.Resources {
if err := r.encode(pe); err != nil {
return err
}
}
pe.putBool(a.ValidateOnly)
return nil
}
func (a *AlterConfigsRequest) decode(pd packetDecoder, version int16) error {
resourceCount, err := pd.getArrayLength()
if err != nil {
return err
}
a.Resources = make([]*AlterConfigsResource, resourceCount)
for i := range a.Resources {
r := &AlterConfigsResource{}
err = r.decode(pd, version)
if err != nil {
return err
}
a.Resources[i] = r
}
validateOnly, err := pd.getBool()
if err != nil {
return err
}
a.ValidateOnly = validateOnly
return nil
}
func (a *AlterConfigsResource) encode(pe packetEncoder) error {
pe.putInt8(int8(a.Type))
if err := pe.putString(a.Name); err != nil {
return err
}
if err := pe.putArrayLength(len(a.ConfigEntries)); err != nil {
return err
}
for configKey, configValue := range a.ConfigEntries {
if err := pe.putString(configKey); err != nil {
return err
}
if err := pe.putNullableString(configValue); err != nil {
return err
}
}
return nil
}
func (a *AlterConfigsResource) decode(pd packetDecoder, version int16) error {
t, err := pd.getInt8()
if err != nil {
return err
}
a.Type = ConfigResourceType(t)
name, err := pd.getString()
if err != nil {
return err
}
a.Name = name
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n > 0 {
a.ConfigEntries = make(map[string]*string, n)
for i := 0; i < n; i++ {
configKey, err := pd.getString()
if err != nil {
return err
}
if a.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
return err
}
}
}
return err
}
func (a *AlterConfigsRequest) key() int16 {
return 33
}
func (a *AlterConfigsRequest) version() int16 {
return 0
}
func (a *AlterConfigsRequest) headerVersion() int16 {
return 1
}
func (a *AlterConfigsRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View File

@ -0,0 +1,101 @@
package sarama
import "time"
//AlterConfigsResponse is a response type for alter config
type AlterConfigsResponse struct {
ThrottleTime time.Duration
Resources []*AlterConfigsResourceResponse
}
//AlterConfigsResourceResponse is a response type for alter config resource
type AlterConfigsResourceResponse struct {
ErrorCode int16
ErrorMsg string
Type ConfigResourceType
Name string
}
func (a *AlterConfigsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(a.Resources)); err != nil {
return err
}
for i := range a.Resources {
pe.putInt16(a.Resources[i].ErrorCode)
err := pe.putString(a.Resources[i].ErrorMsg)
if err != nil {
return nil
}
pe.putInt8(int8(a.Resources[i].Type))
err = pe.putString(a.Resources[i].Name)
if err != nil {
return nil
}
}
return nil
}
func (a *AlterConfigsResponse) decode(pd packetDecoder, version int16) error {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
responseCount, err := pd.getArrayLength()
if err != nil {
return err
}
a.Resources = make([]*AlterConfigsResourceResponse, responseCount)
for i := range a.Resources {
a.Resources[i] = new(AlterConfigsResourceResponse)
errCode, err := pd.getInt16()
if err != nil {
return err
}
a.Resources[i].ErrorCode = errCode
e, err := pd.getString()
if err != nil {
return err
}
a.Resources[i].ErrorMsg = e
t, err := pd.getInt8()
if err != nil {
return err
}
a.Resources[i].Type = ConfigResourceType(t)
name, err := pd.getString()
if err != nil {
return err
}
a.Resources[i].Name = name
}
return nil
}
func (a *AlterConfigsResponse) key() int16 {
return 32
}
func (a *AlterConfigsResponse) version() int16 {
return 0
}
func (a *AlterConfigsResponse) headerVersion() int16 {
return 0
}
func (a *AlterConfigsResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View File

@ -0,0 +1,130 @@
package sarama
type alterPartitionReassignmentsBlock struct {
replicas []int32
}
func (b *alterPartitionReassignmentsBlock) encode(pe packetEncoder) error {
if err := pe.putNullableCompactInt32Array(b.replicas); err != nil {
return err
}
pe.putEmptyTaggedFieldArray()
return nil
}
func (b *alterPartitionReassignmentsBlock) decode(pd packetDecoder) (err error) {
if b.replicas, err = pd.getCompactInt32Array(); err != nil {
return err
}
return nil
}
type AlterPartitionReassignmentsRequest struct {
TimeoutMs int32
blocks map[string]map[int32]*alterPartitionReassignmentsBlock
Version int16
}
func (r *AlterPartitionReassignmentsRequest) encode(pe packetEncoder) error {
pe.putInt32(r.TimeoutMs)
pe.putCompactArrayLength(len(r.blocks))
for topic, partitions := range r.blocks {
if err := pe.putCompactString(topic); err != nil {
return err
}
pe.putCompactArrayLength(len(partitions))
for partition, block := range partitions {
pe.putInt32(partition)
if err := block.encode(pe); err != nil {
return err
}
}
pe.putEmptyTaggedFieldArray()
}
pe.putEmptyTaggedFieldArray()
return nil
}
func (r *AlterPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if r.TimeoutMs, err = pd.getInt32(); err != nil {
return err
}
topicCount, err := pd.getCompactArrayLength()
if err != nil {
return err
}
if topicCount > 0 {
r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock)
for i := 0; i < topicCount; i++ {
topic, err := pd.getCompactString()
if err != nil {
return err
}
partitionCount, err := pd.getCompactArrayLength()
if err != nil {
return err
}
r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock)
for j := 0; j < partitionCount; j++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
block := &alterPartitionReassignmentsBlock{}
if err := block.decode(pd); err != nil {
return err
}
r.blocks[topic][partition] = block
if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
return err
}
}
if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
return err
}
}
}
if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
return err
}
return
}
func (r *AlterPartitionReassignmentsRequest) key() int16 {
return 45
}
func (r *AlterPartitionReassignmentsRequest) version() int16 {
return r.Version
}
func (r *AlterPartitionReassignmentsRequest) headerVersion() int16 {
return 2
}
func (r *AlterPartitionReassignmentsRequest) requiredVersion() KafkaVersion {
return V2_4_0_0
}
func (r *AlterPartitionReassignmentsRequest) AddBlock(topic string, partitionID int32, replicas []int32) {
if r.blocks == nil {
r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock)
}
if r.blocks[topic] == nil {
r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock)
}
r.blocks[topic][partitionID] = &alterPartitionReassignmentsBlock{replicas}
}

View File

@ -0,0 +1,157 @@
package sarama
type alterPartitionReassignmentsErrorBlock struct {
errorCode KError
errorMessage *string
}
func (b *alterPartitionReassignmentsErrorBlock) encode(pe packetEncoder) error {
pe.putInt16(int16(b.errorCode))
if err := pe.putNullableCompactString(b.errorMessage); err != nil {
return err
}
pe.putEmptyTaggedFieldArray()
return nil
}
func (b *alterPartitionReassignmentsErrorBlock) decode(pd packetDecoder) (err error) {
errorCode, err := pd.getInt16()
if err != nil {
return err
}
b.errorCode = KError(errorCode)
b.errorMessage, err = pd.getCompactNullableString()
if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
return err
}
return err
}
type AlterPartitionReassignmentsResponse struct {
Version int16
ThrottleTimeMs int32
ErrorCode KError
ErrorMessage *string
Errors map[string]map[int32]*alterPartitionReassignmentsErrorBlock
}
func (r *AlterPartitionReassignmentsResponse) AddError(topic string, partition int32, kerror KError, message *string) {
if r.Errors == nil {
r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock)
}
partitions := r.Errors[topic]
if partitions == nil {
partitions = make(map[int32]*alterPartitionReassignmentsErrorBlock)
r.Errors[topic] = partitions
}
partitions[partition] = &alterPartitionReassignmentsErrorBlock{errorCode: kerror, errorMessage: message}
}
func (r *AlterPartitionReassignmentsResponse) encode(pe packetEncoder) error {
pe.putInt32(r.ThrottleTimeMs)
pe.putInt16(int16(r.ErrorCode))
if err := pe.putNullableCompactString(r.ErrorMessage); err != nil {
return err
}
pe.putCompactArrayLength(len(r.Errors))
for topic, partitions := range r.Errors {
if err := pe.putCompactString(topic); err != nil {
return err
}
pe.putCompactArrayLength(len(partitions))
for partition, block := range partitions {
pe.putInt32(partition)
if err := block.encode(pe); err != nil {
return err
}
}
pe.putEmptyTaggedFieldArray()
}
pe.putEmptyTaggedFieldArray()
return nil
}
func (r *AlterPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
return err
}
kerr, err := pd.getInt16()
if err != nil {
return err
}
r.ErrorCode = KError(kerr)
if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil {
return err
}
numTopics, err := pd.getCompactArrayLength()
if err != nil {
return err
}
if numTopics > 0 {
r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock, numTopics)
for i := 0; i < numTopics; i++ {
topic, err := pd.getCompactString()
if err != nil {
return err
}
ongoingPartitionReassignments, err := pd.getCompactArrayLength()
if err != nil {
return err
}
r.Errors[topic] = make(map[int32]*alterPartitionReassignmentsErrorBlock, ongoingPartitionReassignments)
for j := 0; j < ongoingPartitionReassignments; j++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
block := &alterPartitionReassignmentsErrorBlock{}
if err := block.decode(pd); err != nil {
return err
}
r.Errors[topic][partition] = block
}
if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
return err
}
}
}
if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
return err
}
return nil
}
func (r *AlterPartitionReassignmentsResponse) key() int16 {
return 45
}
func (r *AlterPartitionReassignmentsResponse) version() int16 {
return r.Version
}
func (r *AlterPartitionReassignmentsResponse) headerVersion() int16 {
return 1
}
func (r *AlterPartitionReassignmentsResponse) requiredVersion() KafkaVersion {
return V2_4_0_0
}

View File

@ -0,0 +1,29 @@
package sarama
//ApiVersionsRequest ...
type ApiVersionsRequest struct {
}
func (a *ApiVersionsRequest) encode(pe packetEncoder) error {
return nil
}
func (a *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) {
return nil
}
func (a *ApiVersionsRequest) key() int16 {
return 18
}
func (a *ApiVersionsRequest) version() int16 {
return 0
}
func (a *ApiVersionsRequest) headerVersion() int16 {
return 1
}
func (a *ApiVersionsRequest) requiredVersion() KafkaVersion {
return V0_10_0_0
}

View File

@ -0,0 +1,93 @@
package sarama
//ApiVersionsResponseBlock is an api version response block type
type ApiVersionsResponseBlock struct {
ApiKey int16
MinVersion int16
MaxVersion int16
}
func (b *ApiVersionsResponseBlock) encode(pe packetEncoder) error {
pe.putInt16(b.ApiKey)
pe.putInt16(b.MinVersion)
pe.putInt16(b.MaxVersion)
return nil
}
func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error {
var err error
if b.ApiKey, err = pd.getInt16(); err != nil {
return err
}
if b.MinVersion, err = pd.getInt16(); err != nil {
return err
}
if b.MaxVersion, err = pd.getInt16(); err != nil {
return err
}
return nil
}
//ApiVersionsResponse is an api version response type
type ApiVersionsResponse struct {
Err KError
ApiVersions []*ApiVersionsResponseBlock
}
func (r *ApiVersionsResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(r.Err))
if err := pe.putArrayLength(len(r.ApiVersions)); err != nil {
return err
}
for _, apiVersion := range r.ApiVersions {
if err := apiVersion.encode(pe); err != nil {
return err
}
}
return nil
}
func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error {
kerr, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(kerr)
numBlocks, err := pd.getArrayLength()
if err != nil {
return err
}
r.ApiVersions = make([]*ApiVersionsResponseBlock, numBlocks)
for i := 0; i < numBlocks; i++ {
block := new(ApiVersionsResponseBlock)
if err := block.decode(pd); err != nil {
return err
}
r.ApiVersions[i] = block
}
return nil
}
func (r *ApiVersionsResponse) key() int16 {
return 18
}
func (r *ApiVersionsResponse) version() int16 {
return 0
}
func (a *ApiVersionsResponse) headerVersion() int16 {
return 0
}
func (r *ApiVersionsResponse) requiredVersion() KafkaVersion {
return V0_10_0_0
}

1154
vendor/github.com/Shopify/sarama/async_producer.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

1061
vendor/github.com/Shopify/sarama/balance_strategy.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

1441
vendor/github.com/Shopify/sarama/broker.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

1070
vendor/github.com/Shopify/sarama/client.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

194
vendor/github.com/Shopify/sarama/compress.go generated vendored Normal file
View File

@ -0,0 +1,194 @@
package sarama
import (
"bytes"
"compress/gzip"
"fmt"
"sync"
snappy "github.com/eapache/go-xerial-snappy"
"github.com/pierrec/lz4"
)
var (
lz4WriterPool = sync.Pool{
New: func() interface{} {
return lz4.NewWriter(nil)
},
}
gzipWriterPool = sync.Pool{
New: func() interface{} {
return gzip.NewWriter(nil)
},
}
gzipWriterPoolForCompressionLevel1 = sync.Pool{
New: func() interface{} {
gz, err := gzip.NewWriterLevel(nil, 1)
if err != nil {
panic(err)
}
return gz
},
}
gzipWriterPoolForCompressionLevel2 = sync.Pool{
New: func() interface{} {
gz, err := gzip.NewWriterLevel(nil, 2)
if err != nil {
panic(err)
}
return gz
},
}
gzipWriterPoolForCompressionLevel3 = sync.Pool{
New: func() interface{} {
gz, err := gzip.NewWriterLevel(nil, 3)
if err != nil {
panic(err)
}
return gz
},
}
gzipWriterPoolForCompressionLevel4 = sync.Pool{
New: func() interface{} {
gz, err := gzip.NewWriterLevel(nil, 4)
if err != nil {
panic(err)
}
return gz
},
}
gzipWriterPoolForCompressionLevel5 = sync.Pool{
New: func() interface{} {
gz, err := gzip.NewWriterLevel(nil, 5)
if err != nil {
panic(err)
}
return gz
},
}
gzipWriterPoolForCompressionLevel6 = sync.Pool{
New: func() interface{} {
gz, err := gzip.NewWriterLevel(nil, 6)
if err != nil {
panic(err)
}
return gz
},
}
gzipWriterPoolForCompressionLevel7 = sync.Pool{
New: func() interface{} {
gz, err := gzip.NewWriterLevel(nil, 7)
if err != nil {
panic(err)
}
return gz
},
}
gzipWriterPoolForCompressionLevel8 = sync.Pool{
New: func() interface{} {
gz, err := gzip.NewWriterLevel(nil, 8)
if err != nil {
panic(err)
}
return gz
},
}
gzipWriterPoolForCompressionLevel9 = sync.Pool{
New: func() interface{} {
gz, err := gzip.NewWriterLevel(nil, 9)
if err != nil {
panic(err)
}
return gz
},
}
)
func compress(cc CompressionCodec, level int, data []byte) ([]byte, error) {
switch cc {
case CompressionNone:
return data, nil
case CompressionGZIP:
var (
err error
buf bytes.Buffer
writer *gzip.Writer
)
switch level {
case CompressionLevelDefault:
writer = gzipWriterPool.Get().(*gzip.Writer)
defer gzipWriterPool.Put(writer)
writer.Reset(&buf)
case 1:
writer = gzipWriterPoolForCompressionLevel1.Get().(*gzip.Writer)
defer gzipWriterPoolForCompressionLevel1.Put(writer)
writer.Reset(&buf)
case 2:
writer = gzipWriterPoolForCompressionLevel2.Get().(*gzip.Writer)
defer gzipWriterPoolForCompressionLevel2.Put(writer)
writer.Reset(&buf)
case 3:
writer = gzipWriterPoolForCompressionLevel3.Get().(*gzip.Writer)
defer gzipWriterPoolForCompressionLevel3.Put(writer)
writer.Reset(&buf)
case 4:
writer = gzipWriterPoolForCompressionLevel4.Get().(*gzip.Writer)
defer gzipWriterPoolForCompressionLevel4.Put(writer)
writer.Reset(&buf)
case 5:
writer = gzipWriterPoolForCompressionLevel5.Get().(*gzip.Writer)
defer gzipWriterPoolForCompressionLevel5.Put(writer)
writer.Reset(&buf)
case 6:
writer = gzipWriterPoolForCompressionLevel6.Get().(*gzip.Writer)
defer gzipWriterPoolForCompressionLevel6.Put(writer)
writer.Reset(&buf)
case 7:
writer = gzipWriterPoolForCompressionLevel7.Get().(*gzip.Writer)
defer gzipWriterPoolForCompressionLevel7.Put(writer)
writer.Reset(&buf)
case 8:
writer = gzipWriterPoolForCompressionLevel8.Get().(*gzip.Writer)
defer gzipWriterPoolForCompressionLevel8.Put(writer)
writer.Reset(&buf)
case 9:
writer = gzipWriterPoolForCompressionLevel9.Get().(*gzip.Writer)
defer gzipWriterPoolForCompressionLevel9.Put(writer)
writer.Reset(&buf)
default:
writer, err = gzip.NewWriterLevel(&buf, level)
if err != nil {
return nil, err
}
}
if _, err := writer.Write(data); err != nil {
return nil, err
}
if err := writer.Close(); err != nil {
return nil, err
}
return buf.Bytes(), nil
case CompressionSnappy:
return snappy.Encode(data), nil
case CompressionLZ4:
writer := lz4WriterPool.Get().(*lz4.Writer)
defer lz4WriterPool.Put(writer)
var buf bytes.Buffer
writer.Reset(&buf)
if _, err := writer.Write(data); err != nil {
return nil, err
}
if err := writer.Close(); err != nil {
return nil, err
}
return buf.Bytes(), nil
case CompressionZSTD:
return zstdCompress(nil, data)
default:
return nil, PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", cc)}
}
}

749
vendor/github.com/Shopify/sarama/config.go generated vendored Normal file
View File

@ -0,0 +1,749 @@
package sarama
import (
"compress/gzip"
"crypto/tls"
"fmt"
"io/ioutil"
"net"
"regexp"
"time"
"github.com/rcrowley/go-metrics"
"golang.org/x/net/proxy"
)
const defaultClientID = "sarama"
var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`)
// Config is used to pass multiple configuration options to Sarama's constructors.
type Config struct {
// Admin is the namespace for ClusterAdmin properties used by the administrative Kafka client.
Admin struct {
Retry struct {
// The total number of times to retry sending (retriable) admin requests (default 5).
// Similar to the `retries` setting of the JVM AdminClientConfig.
Max int
// Backoff time between retries of a failed request (default 100ms)
Backoff time.Duration
}
// The maximum duration the administrative Kafka client will wait for ClusterAdmin operations,
// including topics, brokers, configurations and ACLs (defaults to 3 seconds).
Timeout time.Duration
}
// Net is the namespace for network-level properties used by the Broker, and
// shared by the Client/Producer/Consumer.
Net struct {
// How many outstanding requests a connection is allowed to have before
// sending on it blocks (default 5).
MaxOpenRequests int
// All three of the below configurations are similar to the
// `socket.timeout.ms` setting in JVM kafka. All of them default
// to 30 seconds.
DialTimeout time.Duration // How long to wait for the initial connection.
ReadTimeout time.Duration // How long to wait for a response.
WriteTimeout time.Duration // How long to wait for a transmit.
TLS struct {
// Whether or not to use TLS when connecting to the broker
// (defaults to false).
Enable bool
// The TLS configuration to use for secure connections if
// enabled (defaults to nil).
Config *tls.Config
}
// SASL based authentication with broker. While there are multiple SASL authentication methods
// the current implementation is limited to plaintext (SASL/PLAIN) authentication
SASL struct {
// Whether or not to use SASL authentication when connecting to the broker
// (defaults to false).
Enable bool
// SASLMechanism is the name of the enabled SASL mechanism.
// Possible values: OAUTHBEARER, PLAIN (defaults to PLAIN).
Mechanism SASLMechanism
// Version is the SASL Protocol Version to use
// Kafka > 1.x should use V1, except on Azure EventHub which use V0
Version int16
// Whether or not to send the Kafka SASL handshake first if enabled
// (defaults to true). You should only set this to false if you're using
// a non-Kafka SASL proxy.
Handshake bool
// AuthIdentity is an (optional) authorization identity (authzid) to
// use for SASL/PLAIN authentication (if different from User) when
// an authenticated user is permitted to act as the presented
// alternative user. See RFC4616 for details.
AuthIdentity string
// User is the authentication identity (authcid) to present for
// SASL/PLAIN or SASL/SCRAM authentication
User string
// Password for SASL/PLAIN authentication
Password string
// authz id used for SASL/SCRAM authentication
SCRAMAuthzID string
// SCRAMClientGeneratorFunc is a generator of a user provided implementation of a SCRAM
// client used to perform the SCRAM exchange with the server.
SCRAMClientGeneratorFunc func() SCRAMClient
// TokenProvider is a user-defined callback for generating
// access tokens for SASL/OAUTHBEARER auth. See the
// AccessTokenProvider interface docs for proper implementation
// guidelines.
TokenProvider AccessTokenProvider
GSSAPI GSSAPIConfig
}
// KeepAlive specifies the keep-alive period for an active network connection (defaults to 0).
// If zero or positive, keep-alives are enabled.
// If negative, keep-alives are disabled.
KeepAlive time.Duration
// LocalAddr is the local address to use when dialing an
// address. The address must be of a compatible type for the
// network being dialed.
// If nil, a local address is automatically chosen.
LocalAddr net.Addr
Proxy struct {
// Whether or not to use proxy when connecting to the broker
// (defaults to false).
Enable bool
// The proxy dialer to use enabled (defaults to nil).
Dialer proxy.Dialer
}
}
// Metadata is the namespace for metadata management properties used by the
// Client, and shared by the Producer/Consumer.
Metadata struct {
Retry struct {
// The total number of times to retry a metadata request when the
// cluster is in the middle of a leader election (default 3).
Max int
// How long to wait for leader election to occur before retrying
// (default 250ms). Similar to the JVM's `retry.backoff.ms`.
Backoff time.Duration
// Called to compute backoff time dynamically. Useful for implementing
// more sophisticated backoff strategies. This takes precedence over
// `Backoff` if set.
BackoffFunc func(retries, maxRetries int) time.Duration
}
// How frequently to refresh the cluster metadata in the background.
// Defaults to 10 minutes. Set to 0 to disable. Similar to
// `topic.metadata.refresh.interval.ms` in the JVM version.
RefreshFrequency time.Duration
// Whether to maintain a full set of metadata for all topics, or just
// the minimal set that has been necessary so far. The full set is simpler
// and usually more convenient, but can take up a substantial amount of
// memory if you have many topics and partitions. Defaults to true.
Full bool
// How long to wait for a successful metadata response.
// Disabled by default which means a metadata request against an unreachable
// cluster (all brokers are unreachable or unresponsive) can take up to
// `Net.[Dial|Read]Timeout * BrokerCount * (Metadata.Retry.Max + 1) + Metadata.Retry.Backoff * Metadata.Retry.Max`
// to fail.
Timeout time.Duration
}
// Producer is the namespace for configuration related to producing messages,
// used by the Producer.
Producer struct {
// The maximum permitted size of a message (defaults to 1000000). Should be
// set equal to or smaller than the broker's `message.max.bytes`.
MaxMessageBytes int
// The level of acknowledgement reliability needed from the broker (defaults
// to WaitForLocal). Equivalent to the `request.required.acks` setting of the
// JVM producer.
RequiredAcks RequiredAcks
// The maximum duration the broker will wait the receipt of the number of
// RequiredAcks (defaults to 10 seconds). This is only relevant when
// RequiredAcks is set to WaitForAll or a number > 1. Only supports
// millisecond resolution, nanoseconds will be truncated. Equivalent to
// the JVM producer's `request.timeout.ms` setting.
Timeout time.Duration
// The type of compression to use on messages (defaults to no compression).
// Similar to `compression.codec` setting of the JVM producer.
Compression CompressionCodec
// The level of compression to use on messages. The meaning depends
// on the actual compression type used and defaults to default compression
// level for the codec.
CompressionLevel int
// Generates partitioners for choosing the partition to send messages to
// (defaults to hashing the message key). Similar to the `partitioner.class`
// setting for the JVM producer.
Partitioner PartitionerConstructor
// If enabled, the producer will ensure that exactly one copy of each message is
// written.
Idempotent bool
// Return specifies what channels will be populated. If they are set to true,
// you must read from the respective channels to prevent deadlock. If,
// however, this config is used to create a `SyncProducer`, both must be set
// to true and you shall not read from the channels since the producer does
// this internally.
Return struct {
// If enabled, successfully delivered messages will be returned on the
// Successes channel (default disabled).
Successes bool
// If enabled, messages that failed to deliver will be returned on the
// Errors channel, including error (default enabled).
Errors bool
}
// The following config options control how often messages are batched up and
// sent to the broker. By default, messages are sent as fast as possible, and
// all messages received while the current batch is in-flight are placed
// into the subsequent batch.
Flush struct {
// The best-effort number of bytes needed to trigger a flush. Use the
// global sarama.MaxRequestSize to set a hard upper limit.
Bytes int
// The best-effort number of messages needed to trigger a flush. Use
// `MaxMessages` to set a hard upper limit.
Messages int
// The best-effort frequency of flushes. Equivalent to
// `queue.buffering.max.ms` setting of JVM producer.
Frequency time.Duration
// The maximum number of messages the producer will send in a single
// broker request. Defaults to 0 for unlimited. Similar to
// `queue.buffering.max.messages` in the JVM producer.
MaxMessages int
}
Retry struct {
// The total number of times to retry sending a message (default 3).
// Similar to the `message.send.max.retries` setting of the JVM producer.
Max int
// How long to wait for the cluster to settle between retries
// (default 100ms). Similar to the `retry.backoff.ms` setting of the
// JVM producer.
Backoff time.Duration
// Called to compute backoff time dynamically. Useful for implementing
// more sophisticated backoff strategies. This takes precedence over
// `Backoff` if set.
BackoffFunc func(retries, maxRetries int) time.Duration
}
}
// Consumer is the namespace for configuration related to consuming messages,
// used by the Consumer.
Consumer struct {
// Group is the namespace for configuring consumer group.
Group struct {
Session struct {
// The timeout used to detect consumer failures when using Kafka's group management facility.
// The consumer sends periodic heartbeats to indicate its liveness to the broker.
// If no heartbeats are received by the broker before the expiration of this session timeout,
// then the broker will remove this consumer from the group and initiate a rebalance.
// Note that the value must be in the allowable range as configured in the broker configuration
// by `group.min.session.timeout.ms` and `group.max.session.timeout.ms` (default 10s)
Timeout time.Duration
}
Heartbeat struct {
// The expected time between heartbeats to the consumer coordinator when using Kafka's group
// management facilities. Heartbeats are used to ensure that the consumer's session stays active and
// to facilitate rebalancing when new consumers join or leave the group.
// The value must be set lower than Consumer.Group.Session.Timeout, but typically should be set no
// higher than 1/3 of that value.
// It can be adjusted even lower to control the expected time for normal rebalances (default 3s)
Interval time.Duration
}
Rebalance struct {
// Strategy for allocating topic partitions to members (default BalanceStrategyRange)
Strategy BalanceStrategy
// The maximum allowed time for each worker to join the group once a rebalance has begun.
// This is basically a limit on the amount of time needed for all tasks to flush any pending
// data and commit offsets. If the timeout is exceeded, then the worker will be removed from
// the group, which will cause offset commit failures (default 60s).
Timeout time.Duration
Retry struct {
// When a new consumer joins a consumer group the set of consumers attempt to "rebalance"
// the load to assign partitions to each consumer. If the set of consumers changes while
// this assignment is taking place the rebalance will fail and retry. This setting controls
// the maximum number of attempts before giving up (default 4).
Max int
// Backoff time between retries during rebalance (default 2s)
Backoff time.Duration
}
}
Member struct {
// Custom metadata to include when joining the group. The user data for all joined members
// can be retrieved by sending a DescribeGroupRequest to the broker that is the
// coordinator for the group.
UserData []byte
}
}
Retry struct {
// How long to wait after a failing to read from a partition before
// trying again (default 2s).
Backoff time.Duration
// Called to compute backoff time dynamically. Useful for implementing
// more sophisticated backoff strategies. This takes precedence over
// `Backoff` if set.
BackoffFunc func(retries int) time.Duration
}
// Fetch is the namespace for controlling how many bytes are retrieved by any
// given request.
Fetch struct {
// The minimum number of message bytes to fetch in a request - the broker
// will wait until at least this many are available. The default is 1,
// as 0 causes the consumer to spin when no messages are available.
// Equivalent to the JVM's `fetch.min.bytes`.
Min int32
// The default number of message bytes to fetch from the broker in each
// request (default 1MB). This should be larger than the majority of
// your messages, or else the consumer will spend a lot of time
// negotiating sizes and not actually consuming. Similar to the JVM's
// `fetch.message.max.bytes`.
Default int32
// The maximum number of message bytes to fetch from the broker in a
// single request. Messages larger than this will return
// ErrMessageTooLarge and will not be consumable, so you must be sure
// this is at least as large as your largest message. Defaults to 0
// (no limit). Similar to the JVM's `fetch.message.max.bytes`. The
// global `sarama.MaxResponseSize` still applies.
Max int32
}
// The maximum amount of time the broker will wait for Consumer.Fetch.Min
// bytes to become available before it returns fewer than that anyways. The
// default is 250ms, since 0 causes the consumer to spin when no events are
// available. 100-500ms is a reasonable range for most cases. Kafka only
// supports precision up to milliseconds; nanoseconds will be truncated.
// Equivalent to the JVM's `fetch.wait.max.ms`.
MaxWaitTime time.Duration
// The maximum amount of time the consumer expects a message takes to
// process for the user. If writing to the Messages channel takes longer
// than this, that partition will stop fetching more messages until it
// can proceed again.
// Note that, since the Messages channel is buffered, the actual grace time is
// (MaxProcessingTime * ChannelBufferSize). Defaults to 100ms.
// If a message is not written to the Messages channel between two ticks
// of the expiryTicker then a timeout is detected.
// Using a ticker instead of a timer to detect timeouts should typically
// result in many fewer calls to Timer functions which may result in a
// significant performance improvement if many messages are being sent
// and timeouts are infrequent.
// The disadvantage of using a ticker instead of a timer is that
// timeouts will be less accurate. That is, the effective timeout could
// be between `MaxProcessingTime` and `2 * MaxProcessingTime`. For
// example, if `MaxProcessingTime` is 100ms then a delay of 180ms
// between two messages being sent may not be recognized as a timeout.
MaxProcessingTime time.Duration
// Return specifies what channels will be populated. If they are set to true,
// you must read from them to prevent deadlock.
Return struct {
// If enabled, any errors that occurred while consuming are returned on
// the Errors channel (default disabled).
Errors bool
}
// Offsets specifies configuration for how and when to commit consumed
// offsets. This currently requires the manual use of an OffsetManager
// but will eventually be automated.
Offsets struct {
// Deprecated: CommitInterval exists for historical compatibility
// and should not be used. Please use Consumer.Offsets.AutoCommit
CommitInterval time.Duration
// AutoCommit specifies configuration for commit messages automatically.
AutoCommit struct {
// Whether or not to auto-commit updated offsets back to the broker.
// (default enabled).
Enable bool
// How frequently to commit updated offsets. Ineffective unless
// auto-commit is enabled (default 1s)
Interval time.Duration
}
// The initial offset to use if no offset was previously committed.
// Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest.
Initial int64
// The retention duration for committed offsets. If zero, disabled
// (in which case the `offsets.retention.minutes` option on the
// broker will be used). Kafka only supports precision up to
// milliseconds; nanoseconds will be truncated. Requires Kafka
// broker version 0.9.0 or later.
// (default is 0: disabled).
Retention time.Duration
Retry struct {
// The total number of times to retry failing commit
// requests during OffsetManager shutdown (default 3).
Max int
}
}
// IsolationLevel support 2 mode:
// - use `ReadUncommitted` (default) to consume and return all messages in message channel
// - use `ReadCommitted` to hide messages that are part of an aborted transaction
IsolationLevel IsolationLevel
}
// A user-provided string sent with every request to the brokers for logging,
// debugging, and auditing purposes. Defaults to "sarama", but you should
// probably set it to something specific to your application.
ClientID string
// A rack identifier for this client. This can be any string value which
// indicates where this client is physically located.
// It corresponds with the broker config 'broker.rack'
RackID string
// The number of events to buffer in internal and external channels. This
// permits the producer and consumer to continue processing some messages
// in the background while user code is working, greatly improving throughput.
// Defaults to 256.
ChannelBufferSize int
// The version of Kafka that Sarama will assume it is running against.
// Defaults to the oldest supported stable version. Since Kafka provides
// backwards-compatibility, setting it to a version older than you have
// will not break anything, although it may prevent you from using the
// latest features. Setting it to a version greater than you are actually
// running may lead to random breakage.
Version KafkaVersion
// The registry to define metrics into.
// Defaults to a local registry.
// If you want to disable metrics gathering, set "metrics.UseNilMetrics" to "true"
// prior to starting Sarama.
// See Examples on how to use the metrics registry
MetricRegistry metrics.Registry
}
// NewConfig returns a new configuration instance with sane defaults.
func NewConfig() *Config {
c := &Config{}
c.Admin.Retry.Max = 5
c.Admin.Retry.Backoff = 100 * time.Millisecond
c.Admin.Timeout = 3 * time.Second
c.Net.MaxOpenRequests = 5
c.Net.DialTimeout = 30 * time.Second
c.Net.ReadTimeout = 30 * time.Second
c.Net.WriteTimeout = 30 * time.Second
c.Net.SASL.Handshake = true
c.Net.SASL.Version = SASLHandshakeV0
c.Metadata.Retry.Max = 3
c.Metadata.Retry.Backoff = 250 * time.Millisecond
c.Metadata.RefreshFrequency = 10 * time.Minute
c.Metadata.Full = true
c.Producer.MaxMessageBytes = 1000000
c.Producer.RequiredAcks = WaitForLocal
c.Producer.Timeout = 10 * time.Second
c.Producer.Partitioner = NewHashPartitioner
c.Producer.Retry.Max = 3
c.Producer.Retry.Backoff = 100 * time.Millisecond
c.Producer.Return.Errors = true
c.Producer.CompressionLevel = CompressionLevelDefault
c.Consumer.Fetch.Min = 1
c.Consumer.Fetch.Default = 1024 * 1024
c.Consumer.Retry.Backoff = 2 * time.Second
c.Consumer.MaxWaitTime = 250 * time.Millisecond
c.Consumer.MaxProcessingTime = 100 * time.Millisecond
c.Consumer.Return.Errors = false
c.Consumer.Offsets.AutoCommit.Enable = true
c.Consumer.Offsets.AutoCommit.Interval = 1 * time.Second
c.Consumer.Offsets.Initial = OffsetNewest
c.Consumer.Offsets.Retry.Max = 3
c.Consumer.Group.Session.Timeout = 10 * time.Second
c.Consumer.Group.Heartbeat.Interval = 3 * time.Second
c.Consumer.Group.Rebalance.Strategy = BalanceStrategyRange
c.Consumer.Group.Rebalance.Timeout = 60 * time.Second
c.Consumer.Group.Rebalance.Retry.Max = 4
c.Consumer.Group.Rebalance.Retry.Backoff = 2 * time.Second
c.ClientID = defaultClientID
c.ChannelBufferSize = 256
c.Version = MinVersion
c.MetricRegistry = metrics.NewRegistry()
return c
}
// Validate checks a Config instance. It will return a
// ConfigurationError if the specified values don't make sense.
func (c *Config) Validate() error {
// some configuration values should be warned on but not fail completely, do those first
if !c.Net.TLS.Enable && c.Net.TLS.Config != nil {
Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.")
}
if !c.Net.SASL.Enable {
if c.Net.SASL.User != "" {
Logger.Println("Net.SASL is disabled but a non-empty username was provided.")
}
if c.Net.SASL.Password != "" {
Logger.Println("Net.SASL is disabled but a non-empty password was provided.")
}
}
if c.Producer.RequiredAcks > 1 {
Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.")
}
if c.Producer.MaxMessageBytes >= int(MaxRequestSize) {
Logger.Println("Producer.MaxMessageBytes must be smaller than MaxRequestSize; it will be ignored.")
}
if c.Producer.Flush.Bytes >= int(MaxRequestSize) {
Logger.Println("Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored.")
}
if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 {
Logger.Println("Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed.")
}
if c.Producer.Timeout%time.Millisecond != 0 {
Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.")
}
if c.Consumer.MaxWaitTime < 100*time.Millisecond {
Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.")
}
if c.Consumer.MaxWaitTime%time.Millisecond != 0 {
Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.")
}
if c.Consumer.Offsets.Retention%time.Millisecond != 0 {
Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.")
}
if c.Consumer.Group.Session.Timeout%time.Millisecond != 0 {
Logger.Println("Consumer.Group.Session.Timeout only supports millisecond precision; nanoseconds will be truncated.")
}
if c.Consumer.Group.Heartbeat.Interval%time.Millisecond != 0 {
Logger.Println("Consumer.Group.Heartbeat.Interval only supports millisecond precision; nanoseconds will be truncated.")
}
if c.Consumer.Group.Rebalance.Timeout%time.Millisecond != 0 {
Logger.Println("Consumer.Group.Rebalance.Timeout only supports millisecond precision; nanoseconds will be truncated.")
}
if c.ClientID == defaultClientID {
Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.")
}
// validate Net values
switch {
case c.Net.MaxOpenRequests <= 0:
return ConfigurationError("Net.MaxOpenRequests must be > 0")
case c.Net.DialTimeout <= 0:
return ConfigurationError("Net.DialTimeout must be > 0")
case c.Net.ReadTimeout <= 0:
return ConfigurationError("Net.ReadTimeout must be > 0")
case c.Net.WriteTimeout <= 0:
return ConfigurationError("Net.WriteTimeout must be > 0")
case c.Net.SASL.Enable:
if c.Net.SASL.Mechanism == "" {
c.Net.SASL.Mechanism = SASLTypePlaintext
}
switch c.Net.SASL.Mechanism {
case SASLTypePlaintext:
if c.Net.SASL.User == "" {
return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
}
if c.Net.SASL.Password == "" {
return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
}
case SASLTypeOAuth:
if c.Net.SASL.TokenProvider == nil {
return ConfigurationError("An AccessTokenProvider instance must be provided to Net.SASL.TokenProvider")
}
case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512:
if c.Net.SASL.User == "" {
return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
}
if c.Net.SASL.Password == "" {
return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
}
if c.Net.SASL.SCRAMClientGeneratorFunc == nil {
return ConfigurationError("A SCRAMClientGeneratorFunc function must be provided to Net.SASL.SCRAMClientGeneratorFunc")
}
case SASLTypeGSSAPI:
if c.Net.SASL.GSSAPI.ServiceName == "" {
return ConfigurationError("Net.SASL.GSSAPI.ServiceName must not be empty when GSS-API mechanism is used")
}
if c.Net.SASL.GSSAPI.AuthType == KRB5_USER_AUTH {
if c.Net.SASL.GSSAPI.Password == "" {
return ConfigurationError("Net.SASL.GSSAPI.Password must not be empty when GSS-API " +
"mechanism is used and Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH")
}
} else if c.Net.SASL.GSSAPI.AuthType == KRB5_KEYTAB_AUTH {
if c.Net.SASL.GSSAPI.KeyTabPath == "" {
return ConfigurationError("Net.SASL.GSSAPI.KeyTabPath must not be empty when GSS-API mechanism is used" +
" and Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH")
}
} else {
return ConfigurationError("Net.SASL.GSSAPI.AuthType is invalid. Possible values are KRB5_USER_AUTH and KRB5_KEYTAB_AUTH")
}
if c.Net.SASL.GSSAPI.KerberosConfigPath == "" {
return ConfigurationError("Net.SASL.GSSAPI.KerberosConfigPath must not be empty when GSS-API mechanism is used")
}
if c.Net.SASL.GSSAPI.Username == "" {
return ConfigurationError("Net.SASL.GSSAPI.Username must not be empty when GSS-API mechanism is used")
}
if c.Net.SASL.GSSAPI.Realm == "" {
return ConfigurationError("Net.SASL.GSSAPI.Realm must not be empty when GSS-API mechanism is used")
}
default:
msg := fmt.Sprintf("The SASL mechanism configuration is invalid. Possible values are `%s`, `%s`, `%s`, `%s` and `%s`",
SASLTypeOAuth, SASLTypePlaintext, SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512, SASLTypeGSSAPI)
return ConfigurationError(msg)
}
}
// validate the Admin values
switch {
case c.Admin.Timeout <= 0:
return ConfigurationError("Admin.Timeout must be > 0")
}
// validate the Metadata values
switch {
case c.Metadata.Retry.Max < 0:
return ConfigurationError("Metadata.Retry.Max must be >= 0")
case c.Metadata.Retry.Backoff < 0:
return ConfigurationError("Metadata.Retry.Backoff must be >= 0")
case c.Metadata.RefreshFrequency < 0:
return ConfigurationError("Metadata.RefreshFrequency must be >= 0")
}
// validate the Producer values
switch {
case c.Producer.MaxMessageBytes <= 0:
return ConfigurationError("Producer.MaxMessageBytes must be > 0")
case c.Producer.RequiredAcks < -1:
return ConfigurationError("Producer.RequiredAcks must be >= -1")
case c.Producer.Timeout <= 0:
return ConfigurationError("Producer.Timeout must be > 0")
case c.Producer.Partitioner == nil:
return ConfigurationError("Producer.Partitioner must not be nil")
case c.Producer.Flush.Bytes < 0:
return ConfigurationError("Producer.Flush.Bytes must be >= 0")
case c.Producer.Flush.Messages < 0:
return ConfigurationError("Producer.Flush.Messages must be >= 0")
case c.Producer.Flush.Frequency < 0:
return ConfigurationError("Producer.Flush.Frequency must be >= 0")
case c.Producer.Flush.MaxMessages < 0:
return ConfigurationError("Producer.Flush.MaxMessages must be >= 0")
case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages:
return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set")
case c.Producer.Retry.Max < 0:
return ConfigurationError("Producer.Retry.Max must be >= 0")
case c.Producer.Retry.Backoff < 0:
return ConfigurationError("Producer.Retry.Backoff must be >= 0")
}
if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) {
return ConfigurationError("lz4 compression requires Version >= V0_10_0_0")
}
if c.Producer.Compression == CompressionGZIP {
if c.Producer.CompressionLevel != CompressionLevelDefault {
if _, err := gzip.NewWriterLevel(ioutil.Discard, c.Producer.CompressionLevel); err != nil {
return ConfigurationError(fmt.Sprintf("gzip compression does not work with level %d: %v", c.Producer.CompressionLevel, err))
}
}
}
if c.Producer.Compression == CompressionZSTD && !c.Version.IsAtLeast(V2_1_0_0) {
return ConfigurationError("zstd compression requires Version >= V2_1_0_0")
}
if c.Producer.Idempotent {
if !c.Version.IsAtLeast(V0_11_0_0) {
return ConfigurationError("Idempotent producer requires Version >= V0_11_0_0")
}
if c.Producer.Retry.Max == 0 {
return ConfigurationError("Idempotent producer requires Producer.Retry.Max >= 1")
}
if c.Producer.RequiredAcks != WaitForAll {
return ConfigurationError("Idempotent producer requires Producer.RequiredAcks to be WaitForAll")
}
if c.Net.MaxOpenRequests > 1 {
return ConfigurationError("Idempotent producer requires Net.MaxOpenRequests to be 1")
}
}
// validate the Consumer values
switch {
case c.Consumer.Fetch.Min <= 0:
return ConfigurationError("Consumer.Fetch.Min must be > 0")
case c.Consumer.Fetch.Default <= 0:
return ConfigurationError("Consumer.Fetch.Default must be > 0")
case c.Consumer.Fetch.Max < 0:
return ConfigurationError("Consumer.Fetch.Max must be >= 0")
case c.Consumer.MaxWaitTime < 1*time.Millisecond:
return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms")
case c.Consumer.MaxProcessingTime <= 0:
return ConfigurationError("Consumer.MaxProcessingTime must be > 0")
case c.Consumer.Retry.Backoff < 0:
return ConfigurationError("Consumer.Retry.Backoff must be >= 0")
case c.Consumer.Offsets.AutoCommit.Interval <= 0:
return ConfigurationError("Consumer.Offsets.AutoCommit.Interval must be > 0")
case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest:
return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest")
case c.Consumer.Offsets.Retry.Max < 0:
return ConfigurationError("Consumer.Offsets.Retry.Max must be >= 0")
case c.Consumer.IsolationLevel != ReadUncommitted && c.Consumer.IsolationLevel != ReadCommitted:
return ConfigurationError("Consumer.IsolationLevel must be ReadUncommitted or ReadCommitted")
}
if c.Consumer.Offsets.CommitInterval != 0 {
Logger.Println("Deprecation warning: Consumer.Offsets.CommitInterval exists for historical compatibility" +
" and should not be used. Please use Consumer.Offsets.AutoCommit, the current value will be ignored")
}
// validate IsolationLevel
if c.Consumer.IsolationLevel == ReadCommitted && !c.Version.IsAtLeast(V0_11_0_0) {
return ConfigurationError("ReadCommitted requires Version >= V0_11_0_0")
}
// validate the Consumer Group values
switch {
case c.Consumer.Group.Session.Timeout <= 2*time.Millisecond:
return ConfigurationError("Consumer.Group.Session.Timeout must be >= 2ms")
case c.Consumer.Group.Heartbeat.Interval < 1*time.Millisecond:
return ConfigurationError("Consumer.Group.Heartbeat.Interval must be >= 1ms")
case c.Consumer.Group.Heartbeat.Interval >= c.Consumer.Group.Session.Timeout:
return ConfigurationError("Consumer.Group.Heartbeat.Interval must be < Consumer.Group.Session.Timeout")
case c.Consumer.Group.Rebalance.Strategy == nil:
return ConfigurationError("Consumer.Group.Rebalance.Strategy must not be empty")
case c.Consumer.Group.Rebalance.Timeout <= time.Millisecond:
return ConfigurationError("Consumer.Group.Rebalance.Timeout must be >= 1ms")
case c.Consumer.Group.Rebalance.Retry.Max < 0:
return ConfigurationError("Consumer.Group.Rebalance.Retry.Max must be >= 0")
case c.Consumer.Group.Rebalance.Retry.Backoff < 0:
return ConfigurationError("Consumer.Group.Rebalance.Retry.Backoff must be >= 0")
}
// validate misc shared values
switch {
case c.ChannelBufferSize < 0:
return ConfigurationError("ChannelBufferSize must be >= 0")
case !validID.MatchString(c.ClientID):
return ConfigurationError("ClientID is invalid")
}
return nil
}
func (c *Config) getDialer() proxy.Dialer {
if c.Net.Proxy.Enable {
Logger.Printf("using proxy %s", c.Net.Proxy.Dialer)
return c.Net.Proxy.Dialer
} else {
return &net.Dialer{
Timeout: c.Net.DialTimeout,
KeepAlive: c.Net.KeepAlive,
LocalAddr: c.Net.LocalAddr,
}
}
}

View File

@ -0,0 +1,18 @@
package sarama
// ConfigResourceType is a type for resources that have configs.
type ConfigResourceType int8
// Taken from:
// https://github.com/apache/kafka/blob/ed7c071e07f1f90e4c2895582f61ca090ced3c42/clients/src/main/java/org/apache/kafka/common/config/ConfigResource.java#L32-L55
const (
// UnknownResource constant type
UnknownResource ConfigResourceType = 0
// TopicResource constant type
TopicResource ConfigResourceType = 2
// BrokerResource constant type
BrokerResource ConfigResourceType = 4
// BrokerLoggerResource constant type
BrokerLoggerResource ConfigResourceType = 8
)

911
vendor/github.com/Shopify/sarama/consumer.go generated vendored Normal file
View File

@ -0,0 +1,911 @@
package sarama
import (
"errors"
"fmt"
"math"
"sync"
"sync/atomic"
"time"
"github.com/rcrowley/go-metrics"
)
// ConsumerMessage encapsulates a Kafka message returned by the consumer.
type ConsumerMessage struct {
Headers []*RecordHeader // only set if kafka is version 0.11+
Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp
BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp
Key, Value []byte
Topic string
Partition int32
Offset int64
}
// ConsumerError is what is provided to the user when an error occurs.
// It wraps an error and includes the topic and partition.
type ConsumerError struct {
Topic string
Partition int32
Err error
}
func (ce ConsumerError) Error() string {
return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err)
}
// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface.
// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors
// when stopping.
type ConsumerErrors []*ConsumerError
func (ce ConsumerErrors) Error() string {
return fmt.Sprintf("kafka: %d errors while consuming", len(ce))
}
// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close()
// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of
// scope.
type Consumer interface {
// Topics returns the set of available topics as retrieved from the cluster
// metadata. This method is the same as Client.Topics(), and is provided for
// convenience.
Topics() ([]string, error)
// Partitions returns the sorted list of all partition IDs for the given topic.
// This method is the same as Client.Partitions(), and is provided for convenience.
Partitions(topic string) ([]int32, error)
// ConsumePartition creates a PartitionConsumer on the given topic/partition with
// the given offset. It will return an error if this Consumer is already consuming
// on the given topic/partition. Offset can be a literal offset, or OffsetNewest
// or OffsetOldest
ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error)
// HighWaterMarks returns the current high water marks for each topic and partition.
// Consistency between partitions is not guaranteed since high water marks are updated separately.
HighWaterMarks() map[string]map[int32]int64
// Close shuts down the consumer. It must be called after all child
// PartitionConsumers have already been closed.
Close() error
}
type consumer struct {
conf *Config
children map[string]map[int32]*partitionConsumer
brokerConsumers map[*Broker]*brokerConsumer
client Client
lock sync.Mutex
}
// NewConsumer creates a new consumer using the given broker addresses and configuration.
func NewConsumer(addrs []string, config *Config) (Consumer, error) {
client, err := NewClient(addrs, config)
if err != nil {
return nil, err
}
return newConsumer(client)
}
// NewConsumerFromClient creates a new consumer using the given client. It is still
// necessary to call Close() on the underlying client when shutting down this consumer.
func NewConsumerFromClient(client Client) (Consumer, error) {
// For clients passed in by the client, ensure we don't
// call Close() on it.
cli := &nopCloserClient{client}
return newConsumer(cli)
}
func newConsumer(client Client) (Consumer, error) {
// Check that we are not dealing with a closed Client before processing any other arguments
if client.Closed() {
return nil, ErrClosedClient
}
c := &consumer{
client: client,
conf: client.Config(),
children: make(map[string]map[int32]*partitionConsumer),
brokerConsumers: make(map[*Broker]*brokerConsumer),
}
return c, nil
}
func (c *consumer) Close() error {
return c.client.Close()
}
func (c *consumer) Topics() ([]string, error) {
return c.client.Topics()
}
func (c *consumer) Partitions(topic string) ([]int32, error) {
return c.client.Partitions(topic)
}
func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) {
child := &partitionConsumer{
consumer: c,
conf: c.conf,
topic: topic,
partition: partition,
messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize),
errors: make(chan *ConsumerError, c.conf.ChannelBufferSize),
feeder: make(chan *FetchResponse, 1),
trigger: make(chan none, 1),
dying: make(chan none),
fetchSize: c.conf.Consumer.Fetch.Default,
}
if err := child.chooseStartingOffset(offset); err != nil {
return nil, err
}
var leader *Broker
var err error
if leader, err = c.client.Leader(child.topic, child.partition); err != nil {
return nil, err
}
if err := c.addChild(child); err != nil {
return nil, err
}
go withRecover(child.dispatcher)
go withRecover(child.responseFeeder)
child.broker = c.refBrokerConsumer(leader)
child.broker.input <- child
return child, nil
}
func (c *consumer) HighWaterMarks() map[string]map[int32]int64 {
c.lock.Lock()
defer c.lock.Unlock()
hwms := make(map[string]map[int32]int64)
for topic, p := range c.children {
hwm := make(map[int32]int64, len(p))
for partition, pc := range p {
hwm[partition] = pc.HighWaterMarkOffset()
}
hwms[topic] = hwm
}
return hwms
}
func (c *consumer) addChild(child *partitionConsumer) error {
c.lock.Lock()
defer c.lock.Unlock()
topicChildren := c.children[child.topic]
if topicChildren == nil {
topicChildren = make(map[int32]*partitionConsumer)
c.children[child.topic] = topicChildren
}
if topicChildren[child.partition] != nil {
return ConfigurationError("That topic/partition is already being consumed")
}
topicChildren[child.partition] = child
return nil
}
func (c *consumer) removeChild(child *partitionConsumer) {
c.lock.Lock()
defer c.lock.Unlock()
delete(c.children[child.topic], child.partition)
}
func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer {
c.lock.Lock()
defer c.lock.Unlock()
bc := c.brokerConsumers[broker]
if bc == nil {
bc = c.newBrokerConsumer(broker)
c.brokerConsumers[broker] = bc
}
bc.refs++
return bc
}
func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) {
c.lock.Lock()
defer c.lock.Unlock()
brokerWorker.refs--
if brokerWorker.refs == 0 {
close(brokerWorker.input)
if c.brokerConsumers[brokerWorker.broker] == brokerWorker {
delete(c.brokerConsumers, brokerWorker.broker)
}
}
}
func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
c.lock.Lock()
defer c.lock.Unlock()
delete(c.brokerConsumers, brokerWorker.broker)
}
// PartitionConsumer
// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or
// AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out
// of scope.
//
// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range
// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported
// as out of range by the brokers. In this case you should decide what you want to do (try a different offset,
// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying.
// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set
// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement
// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
//
// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of
// consumer tear-down & return immediately. Continue to loop, servicing the Messages channel until the teardown process
// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call
// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will
// also drain the Messages channel, harvest all errors & return them once cleanup has completed.
type PartitionConsumer interface {
// AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you
// should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this
// function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call
// this before calling Close on the underlying client.
AsyncClose()
// Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain
// the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service
// the Messages channel when this function is called, you will be competing with Close for messages; consider
// calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes
// out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client.
Close() error
// Messages returns the read channel for the messages that are returned by
// the broker.
Messages() <-chan *ConsumerMessage
// Errors returns a read channel of errors that occurred during consuming, if
// enabled. By default, errors are logged and not returned over this channel.
// If you want to implement any custom error handling, set your config's
// Consumer.Return.Errors setting to true, and read from this channel.
Errors() <-chan *ConsumerError
// HighWaterMarkOffset returns the high water mark offset of the partition,
// i.e. the offset that will be used for the next message that will be produced.
// You can use this to determine how far behind the processing is.
HighWaterMarkOffset() int64
}
type partitionConsumer struct {
highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
consumer *consumer
conf *Config
broker *brokerConsumer
messages chan *ConsumerMessage
errors chan *ConsumerError
feeder chan *FetchResponse
trigger, dying chan none
closeOnce sync.Once
topic string
partition int32
responseResult error
fetchSize int32
offset int64
retries int32
}
var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing
func (child *partitionConsumer) sendError(err error) {
cErr := &ConsumerError{
Topic: child.topic,
Partition: child.partition,
Err: err,
}
if child.conf.Consumer.Return.Errors {
child.errors <- cErr
} else {
Logger.Println(cErr)
}
}
func (child *partitionConsumer) computeBackoff() time.Duration {
if child.conf.Consumer.Retry.BackoffFunc != nil {
retries := atomic.AddInt32(&child.retries, 1)
return child.conf.Consumer.Retry.BackoffFunc(int(retries))
}
return child.conf.Consumer.Retry.Backoff
}
func (child *partitionConsumer) dispatcher() {
for range child.trigger {
select {
case <-child.dying:
close(child.trigger)
case <-time.After(child.computeBackoff()):
if child.broker != nil {
child.consumer.unrefBrokerConsumer(child.broker)
child.broker = nil
}
Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition)
if err := child.dispatch(); err != nil {
child.sendError(err)
child.trigger <- none{}
}
}
}
if child.broker != nil {
child.consumer.unrefBrokerConsumer(child.broker)
}
child.consumer.removeChild(child)
close(child.feeder)
}
func (child *partitionConsumer) dispatch() error {
if err := child.consumer.client.RefreshMetadata(child.topic); err != nil {
return err
}
var leader *Broker
var err error
if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil {
return err
}
child.broker = child.consumer.refBrokerConsumer(leader)
child.broker.input <- child
return nil
}
func (child *partitionConsumer) chooseStartingOffset(offset int64) error {
newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest)
if err != nil {
return err
}
oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest)
if err != nil {
return err
}
switch {
case offset == OffsetNewest:
child.offset = newestOffset
case offset == OffsetOldest:
child.offset = oldestOffset
case offset >= oldestOffset && offset <= newestOffset:
child.offset = offset
default:
return ErrOffsetOutOfRange
}
return nil
}
func (child *partitionConsumer) Messages() <-chan *ConsumerMessage {
return child.messages
}
func (child *partitionConsumer) Errors() <-chan *ConsumerError {
return child.errors
}
func (child *partitionConsumer) AsyncClose() {
// this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes
// the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and
// 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will
// also just close itself)
child.closeOnce.Do(func() {
close(child.dying)
})
}
func (child *partitionConsumer) Close() error {
child.AsyncClose()
var errors ConsumerErrors
for err := range child.errors {
errors = append(errors, err)
}
if len(errors) > 0 {
return errors
}
return nil
}
func (child *partitionConsumer) HighWaterMarkOffset() int64 {
return atomic.LoadInt64(&child.highWaterMarkOffset)
}
func (child *partitionConsumer) responseFeeder() {
var msgs []*ConsumerMessage
expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime)
firstAttempt := true
feederLoop:
for response := range child.feeder {
msgs, child.responseResult = child.parseResponse(response)
if child.responseResult == nil {
atomic.StoreInt32(&child.retries, 0)
}
for i, msg := range msgs {
messageSelect:
select {
case <-child.dying:
child.broker.acks.Done()
continue feederLoop
case child.messages <- msg:
firstAttempt = true
case <-expiryTicker.C:
if !firstAttempt {
child.responseResult = errTimedOut
child.broker.acks.Done()
remainingLoop:
for _, msg = range msgs[i:] {
select {
case child.messages <- msg:
case <-child.dying:
break remainingLoop
}
}
child.broker.input <- child
continue feederLoop
} else {
// current message has not been sent, return to select
// statement
firstAttempt = false
goto messageSelect
}
}
}
child.broker.acks.Done()
}
expiryTicker.Stop()
close(child.messages)
close(child.errors)
}
func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMessage, error) {
var messages []*ConsumerMessage
for _, msgBlock := range msgSet.Messages {
for _, msg := range msgBlock.Messages() {
offset := msg.Offset
timestamp := msg.Msg.Timestamp
if msg.Msg.Version >= 1 {
baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset
offset += baseOffset
if msg.Msg.LogAppendTime {
timestamp = msgBlock.Msg.Timestamp
}
}
if offset < child.offset {
continue
}
messages = append(messages, &ConsumerMessage{
Topic: child.topic,
Partition: child.partition,
Key: msg.Msg.Key,
Value: msg.Msg.Value,
Offset: offset,
Timestamp: timestamp,
BlockTimestamp: msgBlock.Msg.Timestamp,
})
child.offset = offset + 1
}
}
if len(messages) == 0 {
child.offset++
}
return messages, nil
}
func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) {
messages := make([]*ConsumerMessage, 0, len(batch.Records))
for _, rec := range batch.Records {
offset := batch.FirstOffset + rec.OffsetDelta
if offset < child.offset {
continue
}
timestamp := batch.FirstTimestamp.Add(rec.TimestampDelta)
if batch.LogAppendTime {
timestamp = batch.MaxTimestamp
}
messages = append(messages, &ConsumerMessage{
Topic: child.topic,
Partition: child.partition,
Key: rec.Key,
Value: rec.Value,
Offset: offset,
Timestamp: timestamp,
Headers: rec.Headers,
})
child.offset = offset + 1
}
if len(messages) == 0 {
child.offset++
}
return messages, nil
}
func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
var (
metricRegistry = child.conf.MetricRegistry
consumerBatchSizeMetric metrics.Histogram
)
if metricRegistry != nil {
consumerBatchSizeMetric = getOrRegisterHistogram("consumer-batch-size", metricRegistry)
}
// If request was throttled and empty we log and return without error
if response.ThrottleTime != time.Duration(0) && len(response.Blocks) == 0 {
Logger.Printf(
"consumer/broker/%d FetchResponse throttled %v\n",
child.broker.broker.ID(), response.ThrottleTime)
return nil, nil
}
block := response.GetBlock(child.topic, child.partition)
if block == nil {
return nil, ErrIncompleteResponse
}
if block.Err != ErrNoError {
return nil, block.Err
}
nRecs, err := block.numRecords()
if err != nil {
return nil, err
}
consumerBatchSizeMetric.Update(int64(nRecs))
if nRecs == 0 {
partialTrailingMessage, err := block.isPartial()
if err != nil {
return nil, err
}
// We got no messages. If we got a trailing one then we need to ask for more data.
// Otherwise we just poll again and wait for one to be produced...
if partialTrailingMessage {
if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max {
// we can't ask for more data, we've hit the configured limit
child.sendError(ErrMessageTooLarge)
child.offset++ // skip this one so we can keep processing future messages
} else {
child.fetchSize *= 2
// check int32 overflow
if child.fetchSize < 0 {
child.fetchSize = math.MaxInt32
}
if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
child.fetchSize = child.conf.Consumer.Fetch.Max
}
}
}
return nil, nil
}
// we got messages, reset our fetch size in case it was increased for a previous request
child.fetchSize = child.conf.Consumer.Fetch.Default
atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset)
// abortedProducerIDs contains producerID which message should be ignored as uncommitted
// - producerID are added when the partitionConsumer iterate over the offset at which an aborted transaction begins (abortedTransaction.FirstOffset)
// - producerID are removed when partitionConsumer iterate over an aborted controlRecord, meaning the aborted transaction for this producer is over
abortedProducerIDs := make(map[int64]struct{}, len(block.AbortedTransactions))
abortedTransactions := block.getAbortedTransactions()
messages := []*ConsumerMessage{}
for _, records := range block.RecordsSet {
switch records.recordsType {
case legacyRecords:
messageSetMessages, err := child.parseMessages(records.MsgSet)
if err != nil {
return nil, err
}
messages = append(messages, messageSetMessages...)
case defaultRecords:
// Consume remaining abortedTransaction up to last offset of current batch
for _, txn := range abortedTransactions {
if txn.FirstOffset > records.RecordBatch.LastOffset() {
break
}
abortedProducerIDs[txn.ProducerID] = struct{}{}
// Pop abortedTransactions so that we never add it again
abortedTransactions = abortedTransactions[1:]
}
recordBatchMessages, err := child.parseRecords(records.RecordBatch)
if err != nil {
return nil, err
}
// Parse and commit offset but do not expose messages that are:
// - control records
// - part of an aborted transaction when set to `ReadCommitted`
// control record
isControl, err := records.isControl()
if err != nil {
// I don't know why there is this continue in case of error to begin with
// Safe bet is to ignore control messages if ReadUncommitted
// and block on them in case of error and ReadCommitted
if child.conf.Consumer.IsolationLevel == ReadCommitted {
return nil, err
}
continue
}
if isControl {
controlRecord, err := records.getControlRecord()
if err != nil {
return nil, err
}
if controlRecord.Type == ControlRecordAbort {
delete(abortedProducerIDs, records.RecordBatch.ProducerID)
}
continue
}
// filter aborted transactions
if child.conf.Consumer.IsolationLevel == ReadCommitted {
_, isAborted := abortedProducerIDs[records.RecordBatch.ProducerID]
if records.RecordBatch.IsTransactional && isAborted {
continue
}
}
messages = append(messages, recordBatchMessages...)
default:
return nil, fmt.Errorf("unknown records type: %v", records.recordsType)
}
}
return messages, nil
}
type brokerConsumer struct {
consumer *consumer
broker *Broker
input chan *partitionConsumer
newSubscriptions chan []*partitionConsumer
subscriptions map[*partitionConsumer]none
wait chan none
acks sync.WaitGroup
refs int
}
func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer {
bc := &brokerConsumer{
consumer: c,
broker: broker,
input: make(chan *partitionConsumer),
newSubscriptions: make(chan []*partitionConsumer),
wait: make(chan none),
subscriptions: make(map[*partitionConsumer]none),
refs: 0,
}
go withRecover(bc.subscriptionManager)
go withRecover(bc.subscriptionConsumer)
return bc
}
// The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
// goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
// up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
// it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available,
// so the main goroutine can block waiting for work if it has none.
func (bc *brokerConsumer) subscriptionManager() {
var buffer []*partitionConsumer
for {
if len(buffer) > 0 {
select {
case event, ok := <-bc.input:
if !ok {
goto done
}
buffer = append(buffer, event)
case bc.newSubscriptions <- buffer:
buffer = nil
case bc.wait <- none{}:
}
} else {
select {
case event, ok := <-bc.input:
if !ok {
goto done
}
buffer = append(buffer, event)
case bc.newSubscriptions <- nil:
}
}
}
done:
close(bc.wait)
if len(buffer) > 0 {
bc.newSubscriptions <- buffer
}
close(bc.newSubscriptions)
}
//subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
func (bc *brokerConsumer) subscriptionConsumer() {
<-bc.wait // wait for our first piece of work
for newSubscriptions := range bc.newSubscriptions {
bc.updateSubscriptions(newSubscriptions)
if len(bc.subscriptions) == 0 {
// We're about to be shut down or we're about to receive more subscriptions.
// Either way, the signal just hasn't propagated to our goroutine yet.
<-bc.wait
continue
}
response, err := bc.fetchNewMessages()
if err != nil {
Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err)
bc.abort(err)
return
}
bc.acks.Add(len(bc.subscriptions))
for child := range bc.subscriptions {
child.feeder <- response
}
bc.acks.Wait()
bc.handleResponses()
}
}
func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) {
for _, child := range newSubscriptions {
bc.subscriptions[child] = none{}
Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
}
for child := range bc.subscriptions {
select {
case <-child.dying:
Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
close(child.trigger)
delete(bc.subscriptions, child)
default:
// no-op
}
}
}
//handleResponses handles the response codes left for us by our subscriptions, and abandons ones that have been closed
func (bc *brokerConsumer) handleResponses() {
for child := range bc.subscriptions {
result := child.responseResult
child.responseResult = nil
switch result {
case nil:
// no-op
case errTimedOut:
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n",
bc.broker.ID(), child.topic, child.partition)
delete(bc.subscriptions, child)
case ErrOffsetOutOfRange:
// there's no point in retrying this it will just fail the same way again
// shut it down and force the user to choose what to do
child.sendError(result)
Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result)
close(child.trigger)
delete(bc.subscriptions, child)
case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, ErrReplicaNotAvailable:
// not an error, but does need redispatching
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
bc.broker.ID(), child.topic, child.partition, result)
child.trigger <- none{}
delete(bc.subscriptions, child)
default:
// dunno, tell the user and try redispatching
child.sendError(result)
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
bc.broker.ID(), child.topic, child.partition, result)
child.trigger <- none{}
delete(bc.subscriptions, child)
}
}
}
func (bc *brokerConsumer) abort(err error) {
bc.consumer.abandonBrokerConsumer(bc)
_ = bc.broker.Close() // we don't care about the error this might return, we already have one
for child := range bc.subscriptions {
child.sendError(err)
child.trigger <- none{}
}
for newSubscriptions := range bc.newSubscriptions {
if len(newSubscriptions) == 0 {
<-bc.wait
continue
}
for _, child := range newSubscriptions {
child.sendError(err)
child.trigger <- none{}
}
}
}
func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
request := &FetchRequest{
MinBytes: bc.consumer.conf.Consumer.Fetch.Min,
MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond),
}
if bc.consumer.conf.Version.IsAtLeast(V0_9_0_0) {
request.Version = 1
}
if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) {
request.Version = 2
}
if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) {
request.Version = 3
request.MaxBytes = MaxResponseSize
}
if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) {
request.Version = 4
request.Isolation = bc.consumer.conf.Consumer.IsolationLevel
}
if bc.consumer.conf.Version.IsAtLeast(V1_1_0_0) {
request.Version = 7
// We do not currently implement KIP-227 FetchSessions. Setting the id to 0
// and the epoch to -1 tells the broker not to generate as session ID we're going
// to just ignore anyway.
request.SessionID = 0
request.SessionEpoch = -1
}
if bc.consumer.conf.Version.IsAtLeast(V2_1_0_0) {
request.Version = 10
}
if bc.consumer.conf.Version.IsAtLeast(V2_3_0_0) {
request.Version = 11
request.RackID = bc.consumer.conf.RackID
}
for child := range bc.subscriptions {
request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize)
}
return bc.broker.Fetch(request)
}

867
vendor/github.com/Shopify/sarama/consumer_group.go generated vendored Normal file
View File

@ -0,0 +1,867 @@
package sarama
import (
"context"
"errors"
"fmt"
"sort"
"sync"
"time"
)
// ErrClosedConsumerGroup is the error returned when a method is called on a consumer group that has been closed.
var ErrClosedConsumerGroup = errors.New("kafka: tried to use a consumer group that was closed")
// ConsumerGroup is responsible for dividing up processing of topics and partitions
// over a collection of processes (the members of the consumer group).
type ConsumerGroup interface {
// Consume joins a cluster of consumers for a given list of topics and
// starts a blocking ConsumerGroupSession through the ConsumerGroupHandler.
//
// The life-cycle of a session is represented by the following steps:
//
// 1. The consumers join the group (as explained in https://kafka.apache.org/documentation/#intro_consumers)
// and is assigned their "fair share" of partitions, aka 'claims'.
// 2. Before processing starts, the handler's Setup() hook is called to notify the user
// of the claims and allow any necessary preparation or alteration of state.
// 3. For each of the assigned claims the handler's ConsumeClaim() function is then called
// in a separate goroutine which requires it to be thread-safe. Any state must be carefully protected
// from concurrent reads/writes.
// 4. The session will persist until one of the ConsumeClaim() functions exits. This can be either when the
// parent context is cancelled or when a server-side rebalance cycle is initiated.
// 5. Once all the ConsumeClaim() loops have exited, the handler's Cleanup() hook is called
// to allow the user to perform any final tasks before a rebalance.
// 6. Finally, marked offsets are committed one last time before claims are released.
//
// Please note, that once a rebalance is triggered, sessions must be completed within
// Config.Consumer.Group.Rebalance.Timeout. This means that ConsumeClaim() functions must exit
// as quickly as possible to allow time for Cleanup() and the final offset commit. If the timeout
// is exceeded, the consumer will be removed from the group by Kafka, which will cause offset
// commit failures.
// This method should be called inside an infinite loop, when a
// server-side rebalance happens, the consumer session will need to be
// recreated to get the new claims.
Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error
// Errors returns a read channel of errors that occurred during the consumer life-cycle.
// By default, errors are logged and not returned over this channel.
// If you want to implement any custom error handling, set your config's
// Consumer.Return.Errors setting to true, and read from this channel.
Errors() <-chan error
// Close stops the ConsumerGroup and detaches any running sessions. It is required to call
// this function before the object passes out of scope, as it will otherwise leak memory.
Close() error
}
type consumerGroup struct {
client Client
config *Config
consumer Consumer
groupID string
memberID string
errors chan error
lock sync.Mutex
closed chan none
closeOnce sync.Once
userData []byte
}
// NewConsumerGroup creates a new consumer group the given broker addresses and configuration.
func NewConsumerGroup(addrs []string, groupID string, config *Config) (ConsumerGroup, error) {
client, err := NewClient(addrs, config)
if err != nil {
return nil, err
}
c, err := newConsumerGroup(groupID, client)
if err != nil {
_ = client.Close()
}
return c, err
}
// NewConsumerGroupFromClient creates a new consumer group using the given client. It is still
// necessary to call Close() on the underlying client when shutting down this consumer.
// PLEASE NOTE: consumer groups can only re-use but not share clients.
func NewConsumerGroupFromClient(groupID string, client Client) (ConsumerGroup, error) {
// For clients passed in by the client, ensure we don't
// call Close() on it.
cli := &nopCloserClient{client}
return newConsumerGroup(groupID, cli)
}
func newConsumerGroup(groupID string, client Client) (ConsumerGroup, error) {
config := client.Config()
if !config.Version.IsAtLeast(V0_10_2_0) {
return nil, ConfigurationError("consumer groups require Version to be >= V0_10_2_0")
}
consumer, err := NewConsumerFromClient(client)
if err != nil {
return nil, err
}
return &consumerGroup{
client: client,
consumer: consumer,
config: config,
groupID: groupID,
errors: make(chan error, config.ChannelBufferSize),
closed: make(chan none),
}, nil
}
// Errors implements ConsumerGroup.
func (c *consumerGroup) Errors() <-chan error { return c.errors }
// Close implements ConsumerGroup.
func (c *consumerGroup) Close() (err error) {
c.closeOnce.Do(func() {
close(c.closed)
// leave group
if e := c.leave(); e != nil {
err = e
}
// drain errors
go func() {
close(c.errors)
}()
for e := range c.errors {
err = e
}
if e := c.client.Close(); e != nil {
err = e
}
})
return
}
// Consume implements ConsumerGroup.
func (c *consumerGroup) Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error {
// Ensure group is not closed
select {
case <-c.closed:
return ErrClosedConsumerGroup
default:
}
c.lock.Lock()
defer c.lock.Unlock()
// Quick exit when no topics are provided
if len(topics) == 0 {
return fmt.Errorf("no topics provided")
}
// Refresh metadata for requested topics
if err := c.client.RefreshMetadata(topics...); err != nil {
return err
}
// Init session
sess, err := c.newSession(ctx, topics, handler, c.config.Consumer.Group.Rebalance.Retry.Max)
if err == ErrClosedClient {
return ErrClosedConsumerGroup
} else if err != nil {
return err
}
// loop check topic partition numbers changed
// will trigger rebalance when any topic partitions number had changed
// avoid Consume function called again that will generate more than loopCheckPartitionNumbers coroutine
go c.loopCheckPartitionNumbers(topics, sess)
// Wait for session exit signal
<-sess.ctx.Done()
// Gracefully release session claims
return sess.release(true)
}
func (c *consumerGroup) retryNewSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int, refreshCoordinator bool) (*consumerGroupSession, error) {
select {
case <-c.closed:
return nil, ErrClosedConsumerGroup
case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff):
}
if refreshCoordinator {
err := c.client.RefreshCoordinator(c.groupID)
if err != nil {
return c.retryNewSession(ctx, topics, handler, retries, true)
}
}
return c.newSession(ctx, topics, handler, retries-1)
}
func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int) (*consumerGroupSession, error) {
coordinator, err := c.client.Coordinator(c.groupID)
if err != nil {
if retries <= 0 {
return nil, err
}
return c.retryNewSession(ctx, topics, handler, retries, true)
}
// Join consumer group
join, err := c.joinGroupRequest(coordinator, topics)
if err != nil {
_ = coordinator.Close()
return nil, err
}
switch join.Err {
case ErrNoError:
c.memberID = join.MemberId
case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately
c.memberID = ""
return c.newSession(ctx, topics, handler, retries)
case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh
if retries <= 0 {
return nil, join.Err
}
return c.retryNewSession(ctx, topics, handler, retries, true)
case ErrRebalanceInProgress: // retry after backoff
if retries <= 0 {
return nil, join.Err
}
return c.retryNewSession(ctx, topics, handler, retries, false)
default:
return nil, join.Err
}
// Prepare distribution plan if we joined as the leader
var plan BalanceStrategyPlan
if join.LeaderId == join.MemberId {
members, err := join.GetMembers()
if err != nil {
return nil, err
}
plan, err = c.balance(members)
if err != nil {
return nil, err
}
}
// Sync consumer group
sync, err := c.syncGroupRequest(coordinator, plan, join.GenerationId)
if err != nil {
_ = coordinator.Close()
return nil, err
}
switch sync.Err {
case ErrNoError:
case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately
c.memberID = ""
return c.newSession(ctx, topics, handler, retries)
case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh
if retries <= 0 {
return nil, sync.Err
}
return c.retryNewSession(ctx, topics, handler, retries, true)
case ErrRebalanceInProgress: // retry after backoff
if retries <= 0 {
return nil, sync.Err
}
return c.retryNewSession(ctx, topics, handler, retries, false)
default:
return nil, sync.Err
}
// Retrieve and sort claims
var claims map[string][]int32
if len(sync.MemberAssignment) > 0 {
members, err := sync.GetMemberAssignment()
if err != nil {
return nil, err
}
claims = members.Topics
c.userData = members.UserData
for _, partitions := range claims {
sort.Sort(int32Slice(partitions))
}
}
return newConsumerGroupSession(ctx, c, claims, join.MemberId, join.GenerationId, handler)
}
func (c *consumerGroup) joinGroupRequest(coordinator *Broker, topics []string) (*JoinGroupResponse, error) {
req := &JoinGroupRequest{
GroupId: c.groupID,
MemberId: c.memberID,
SessionTimeout: int32(c.config.Consumer.Group.Session.Timeout / time.Millisecond),
ProtocolType: "consumer",
}
if c.config.Version.IsAtLeast(V0_10_1_0) {
req.Version = 1
req.RebalanceTimeout = int32(c.config.Consumer.Group.Rebalance.Timeout / time.Millisecond)
}
// use static user-data if configured, otherwise use consumer-group userdata from the last sync
userData := c.config.Consumer.Group.Member.UserData
if len(userData) == 0 {
userData = c.userData
}
meta := &ConsumerGroupMemberMetadata{
Topics: topics,
UserData: userData,
}
strategy := c.config.Consumer.Group.Rebalance.Strategy
if err := req.AddGroupProtocolMetadata(strategy.Name(), meta); err != nil {
return nil, err
}
return coordinator.JoinGroup(req)
}
func (c *consumerGroup) syncGroupRequest(coordinator *Broker, plan BalanceStrategyPlan, generationID int32) (*SyncGroupResponse, error) {
req := &SyncGroupRequest{
GroupId: c.groupID,
MemberId: c.memberID,
GenerationId: generationID,
}
strategy := c.config.Consumer.Group.Rebalance.Strategy
for memberID, topics := range plan {
assignment := &ConsumerGroupMemberAssignment{Topics: topics}
userDataBytes, err := strategy.AssignmentData(memberID, topics, generationID)
if err != nil {
return nil, err
}
assignment.UserData = userDataBytes
if err := req.AddGroupAssignmentMember(memberID, assignment); err != nil {
return nil, err
}
}
return coordinator.SyncGroup(req)
}
func (c *consumerGroup) heartbeatRequest(coordinator *Broker, memberID string, generationID int32) (*HeartbeatResponse, error) {
req := &HeartbeatRequest{
GroupId: c.groupID,
MemberId: memberID,
GenerationId: generationID,
}
return coordinator.Heartbeat(req)
}
func (c *consumerGroup) balance(members map[string]ConsumerGroupMemberMetadata) (BalanceStrategyPlan, error) {
topics := make(map[string][]int32)
for _, meta := range members {
for _, topic := range meta.Topics {
topics[topic] = nil
}
}
for topic := range topics {
partitions, err := c.client.Partitions(topic)
if err != nil {
return nil, err
}
topics[topic] = partitions
}
strategy := c.config.Consumer.Group.Rebalance.Strategy
return strategy.Plan(members, topics)
}
// Leaves the cluster, called by Close.
func (c *consumerGroup) leave() error {
c.lock.Lock()
defer c.lock.Unlock()
if c.memberID == "" {
return nil
}
coordinator, err := c.client.Coordinator(c.groupID)
if err != nil {
return err
}
resp, err := coordinator.LeaveGroup(&LeaveGroupRequest{
GroupId: c.groupID,
MemberId: c.memberID,
})
if err != nil {
_ = coordinator.Close()
return err
}
// Unset memberID
c.memberID = ""
// Check response
switch resp.Err {
case ErrRebalanceInProgress, ErrUnknownMemberId, ErrNoError:
return nil
default:
return resp.Err
}
}
func (c *consumerGroup) handleError(err error, topic string, partition int32) {
if _, ok := err.(*ConsumerError); !ok && topic != "" && partition > -1 {
err = &ConsumerError{
Topic: topic,
Partition: partition,
Err: err,
}
}
if !c.config.Consumer.Return.Errors {
Logger.Println(err)
return
}
select {
case <-c.closed:
//consumer is closed
return
default:
}
select {
case c.errors <- err:
default:
// no error listener
}
}
func (c *consumerGroup) loopCheckPartitionNumbers(topics []string, session *consumerGroupSession) {
pause := time.NewTicker(c.config.Metadata.RefreshFrequency)
defer session.cancel()
defer pause.Stop()
var oldTopicToPartitionNum map[string]int
var err error
if oldTopicToPartitionNum, err = c.topicToPartitionNumbers(topics); err != nil {
return
}
for {
if newTopicToPartitionNum, err := c.topicToPartitionNumbers(topics); err != nil {
return
} else {
for topic, num := range oldTopicToPartitionNum {
if newTopicToPartitionNum[topic] != num {
return // trigger the end of the session on exit
}
}
}
select {
case <-pause.C:
case <-session.ctx.Done():
Logger.Printf("loop check partition number coroutine will exit, topics %s", topics)
// if session closed by other, should be exited
return
case <-c.closed:
return
}
}
}
func (c *consumerGroup) topicToPartitionNumbers(topics []string) (map[string]int, error) {
topicToPartitionNum := make(map[string]int, len(topics))
for _, topic := range topics {
if partitionNum, err := c.client.Partitions(topic); err != nil {
Logger.Printf("Consumer Group topic %s get partition number failed %v", topic, err)
return nil, err
} else {
topicToPartitionNum[topic] = len(partitionNum)
}
}
return topicToPartitionNum, nil
}
// --------------------------------------------------------------------
// ConsumerGroupSession represents a consumer group member session.
type ConsumerGroupSession interface {
// Claims returns information about the claimed partitions by topic.
Claims() map[string][]int32
// MemberID returns the cluster member ID.
MemberID() string
// GenerationID returns the current generation ID.
GenerationID() int32
// MarkOffset marks the provided offset, alongside a metadata string
// that represents the state of the partition consumer at that point in time. The
// metadata string can be used by another consumer to restore that state, so it
// can resume consumption.
//
// To follow upstream conventions, you are expected to mark the offset of the
// next message to read, not the last message read. Thus, when calling `MarkOffset`
// you should typically add one to the offset of the last consumed message.
//
// Note: calling MarkOffset does not necessarily commit the offset to the backend
// store immediately for efficiency reasons, and it may never be committed if
// your application crashes. This means that you may end up processing the same
// message twice, and your processing should ideally be idempotent.
MarkOffset(topic string, partition int32, offset int64, metadata string)
// ResetOffset resets to the provided offset, alongside a metadata string that
// represents the state of the partition consumer at that point in time. Reset
// acts as a counterpart to MarkOffset, the difference being that it allows to
// reset an offset to an earlier or smaller value, where MarkOffset only
// allows incrementing the offset. cf MarkOffset for more details.
ResetOffset(topic string, partition int32, offset int64, metadata string)
// MarkMessage marks a message as consumed.
MarkMessage(msg *ConsumerMessage, metadata string)
// Context returns the session context.
Context() context.Context
}
type consumerGroupSession struct {
parent *consumerGroup
memberID string
generationID int32
handler ConsumerGroupHandler
claims map[string][]int32
offsets *offsetManager
ctx context.Context
cancel func()
waitGroup sync.WaitGroup
releaseOnce sync.Once
hbDying, hbDead chan none
}
func newConsumerGroupSession(ctx context.Context, parent *consumerGroup, claims map[string][]int32, memberID string, generationID int32, handler ConsumerGroupHandler) (*consumerGroupSession, error) {
// init offset manager
offsets, err := newOffsetManagerFromClient(parent.groupID, memberID, generationID, parent.client)
if err != nil {
return nil, err
}
// init context
ctx, cancel := context.WithCancel(ctx)
// init session
sess := &consumerGroupSession{
parent: parent,
memberID: memberID,
generationID: generationID,
handler: handler,
offsets: offsets,
claims: claims,
ctx: ctx,
cancel: cancel,
hbDying: make(chan none),
hbDead: make(chan none),
}
// start heartbeat loop
go sess.heartbeatLoop()
// create a POM for each claim
for topic, partitions := range claims {
for _, partition := range partitions {
pom, err := offsets.ManagePartition(topic, partition)
if err != nil {
_ = sess.release(false)
return nil, err
}
// handle POM errors
go func(topic string, partition int32) {
for err := range pom.Errors() {
sess.parent.handleError(err, topic, partition)
}
}(topic, partition)
}
}
// perform setup
if err := handler.Setup(sess); err != nil {
_ = sess.release(true)
return nil, err
}
// start consuming
for topic, partitions := range claims {
for _, partition := range partitions {
sess.waitGroup.Add(1)
go func(topic string, partition int32) {
defer sess.waitGroup.Done()
// cancel the as session as soon as the first
// goroutine exits
defer sess.cancel()
// consume a single topic/partition, blocking
sess.consume(topic, partition)
}(topic, partition)
}
}
return sess, nil
}
func (s *consumerGroupSession) Claims() map[string][]int32 { return s.claims }
func (s *consumerGroupSession) MemberID() string { return s.memberID }
func (s *consumerGroupSession) GenerationID() int32 { return s.generationID }
func (s *consumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) {
if pom := s.offsets.findPOM(topic, partition); pom != nil {
pom.MarkOffset(offset, metadata)
}
}
func (s *consumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) {
if pom := s.offsets.findPOM(topic, partition); pom != nil {
pom.ResetOffset(offset, metadata)
}
}
func (s *consumerGroupSession) MarkMessage(msg *ConsumerMessage, metadata string) {
s.MarkOffset(msg.Topic, msg.Partition, msg.Offset+1, metadata)
}
func (s *consumerGroupSession) Context() context.Context {
return s.ctx
}
func (s *consumerGroupSession) consume(topic string, partition int32) {
// quick exit if rebalance is due
select {
case <-s.ctx.Done():
return
case <-s.parent.closed:
return
default:
}
// get next offset
offset := s.parent.config.Consumer.Offsets.Initial
if pom := s.offsets.findPOM(topic, partition); pom != nil {
offset, _ = pom.NextOffset()
}
// create new claim
claim, err := newConsumerGroupClaim(s, topic, partition, offset)
if err != nil {
s.parent.handleError(err, topic, partition)
return
}
// handle errors
go func() {
for err := range claim.Errors() {
s.parent.handleError(err, topic, partition)
}
}()
// trigger close when session is done
go func() {
select {
case <-s.ctx.Done():
case <-s.parent.closed:
}
claim.AsyncClose()
}()
// start processing
if err := s.handler.ConsumeClaim(s, claim); err != nil {
s.parent.handleError(err, topic, partition)
}
// ensure consumer is closed & drained
claim.AsyncClose()
for _, err := range claim.waitClosed() {
s.parent.handleError(err, topic, partition)
}
}
func (s *consumerGroupSession) release(withCleanup bool) (err error) {
// signal release, stop heartbeat
s.cancel()
// wait for consumers to exit
s.waitGroup.Wait()
// perform release
s.releaseOnce.Do(func() {
if withCleanup {
if e := s.handler.Cleanup(s); e != nil {
s.parent.handleError(e, "", -1)
err = e
}
}
if e := s.offsets.Close(); e != nil {
err = e
}
close(s.hbDying)
<-s.hbDead
})
return
}
func (s *consumerGroupSession) heartbeatLoop() {
defer close(s.hbDead)
defer s.cancel() // trigger the end of the session on exit
pause := time.NewTicker(s.parent.config.Consumer.Group.Heartbeat.Interval)
defer pause.Stop()
retries := s.parent.config.Metadata.Retry.Max
for {
coordinator, err := s.parent.client.Coordinator(s.parent.groupID)
if err != nil {
if retries <= 0 {
s.parent.handleError(err, "", -1)
return
}
select {
case <-s.hbDying:
return
case <-time.After(s.parent.config.Metadata.Retry.Backoff):
retries--
}
continue
}
resp, err := s.parent.heartbeatRequest(coordinator, s.memberID, s.generationID)
if err != nil {
_ = coordinator.Close()
if retries <= 0 {
s.parent.handleError(err, "", -1)
return
}
retries--
continue
}
switch resp.Err {
case ErrNoError:
retries = s.parent.config.Metadata.Retry.Max
case ErrRebalanceInProgress, ErrUnknownMemberId, ErrIllegalGeneration:
return
default:
s.parent.handleError(resp.Err, "", -1)
return
}
select {
case <-pause.C:
case <-s.hbDying:
return
}
}
}
// --------------------------------------------------------------------
// ConsumerGroupHandler instances are used to handle individual topic/partition claims.
// It also provides hooks for your consumer group session life-cycle and allow you to
// trigger logic before or after the consume loop(s).
//
// PLEASE NOTE that handlers are likely be called from several goroutines concurrently,
// ensure that all state is safely protected against race conditions.
type ConsumerGroupHandler interface {
// Setup is run at the beginning of a new session, before ConsumeClaim.
Setup(ConsumerGroupSession) error
// Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited
// but before the offsets are committed for the very last time.
Cleanup(ConsumerGroupSession) error
// ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages().
// Once the Messages() channel is closed, the Handler must finish its processing
// loop and exit.
ConsumeClaim(ConsumerGroupSession, ConsumerGroupClaim) error
}
// ConsumerGroupClaim processes Kafka messages from a given topic and partition within a consumer group.
type ConsumerGroupClaim interface {
// Topic returns the consumed topic name.
Topic() string
// Partition returns the consumed partition.
Partition() int32
// InitialOffset returns the initial offset that was used as a starting point for this claim.
InitialOffset() int64
// HighWaterMarkOffset returns the high water mark offset of the partition,
// i.e. the offset that will be used for the next message that will be produced.
// You can use this to determine how far behind the processing is.
HighWaterMarkOffset() int64
// Messages returns the read channel for the messages that are returned by
// the broker. The messages channel will be closed when a new rebalance cycle
// is due. You must finish processing and mark offsets within
// Config.Consumer.Group.Session.Timeout before the topic/partition is eventually
// re-assigned to another group member.
Messages() <-chan *ConsumerMessage
}
type consumerGroupClaim struct {
topic string
partition int32
offset int64
PartitionConsumer
}
func newConsumerGroupClaim(sess *consumerGroupSession, topic string, partition int32, offset int64) (*consumerGroupClaim, error) {
pcm, err := sess.parent.consumer.ConsumePartition(topic, partition, offset)
if err == ErrOffsetOutOfRange {
offset = sess.parent.config.Consumer.Offsets.Initial
pcm, err = sess.parent.consumer.ConsumePartition(topic, partition, offset)
}
if err != nil {
return nil, err
}
go func() {
for err := range pcm.Errors() {
sess.parent.handleError(err, topic, partition)
}
}()
return &consumerGroupClaim{
topic: topic,
partition: partition,
offset: offset,
PartitionConsumer: pcm,
}, nil
}
func (c *consumerGroupClaim) Topic() string { return c.topic }
func (c *consumerGroupClaim) Partition() int32 { return c.partition }
func (c *consumerGroupClaim) InitialOffset() int64 { return c.offset }
// Drains messages and errors, ensures the claim is fully closed.
func (c *consumerGroupClaim) waitClosed() (errs ConsumerErrors) {
go func() {
for range c.Messages() {
}
}()
for err := range c.Errors() {
errs = append(errs, err)
}
return
}

View File

@ -0,0 +1,96 @@
package sarama
//ConsumerGroupMemberMetadata holds the metadata for consumer group
type ConsumerGroupMemberMetadata struct {
Version int16
Topics []string
UserData []byte
}
func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error {
pe.putInt16(m.Version)
if err := pe.putStringArray(m.Topics); err != nil {
return err
}
if err := pe.putBytes(m.UserData); err != nil {
return err
}
return nil
}
func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) {
if m.Version, err = pd.getInt16(); err != nil {
return
}
if m.Topics, err = pd.getStringArray(); err != nil {
return
}
if m.UserData, err = pd.getBytes(); err != nil {
return
}
return nil
}
//ConsumerGroupMemberAssignment holds the member assignment for a consume group
type ConsumerGroupMemberAssignment struct {
Version int16
Topics map[string][]int32
UserData []byte
}
func (m *ConsumerGroupMemberAssignment) encode(pe packetEncoder) error {
pe.putInt16(m.Version)
if err := pe.putArrayLength(len(m.Topics)); err != nil {
return err
}
for topic, partitions := range m.Topics {
if err := pe.putString(topic); err != nil {
return err
}
if err := pe.putInt32Array(partitions); err != nil {
return err
}
}
if err := pe.putBytes(m.UserData); err != nil {
return err
}
return nil
}
func (m *ConsumerGroupMemberAssignment) decode(pd packetDecoder) (err error) {
if m.Version, err = pd.getInt16(); err != nil {
return
}
var topicLen int
if topicLen, err = pd.getArrayLength(); err != nil {
return
}
m.Topics = make(map[string][]int32, topicLen)
for i := 0; i < topicLen; i++ {
var topic string
if topic, err = pd.getString(); err != nil {
return
}
if m.Topics[topic], err = pd.getInt32Array(); err != nil {
return
}
}
if m.UserData, err = pd.getBytes(); err != nil {
return
}
return nil
}

View File

@ -0,0 +1,38 @@
package sarama
//ConsumerMetadataRequest is used for metadata requests
type ConsumerMetadataRequest struct {
ConsumerGroup string
}
func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error {
tmp := new(FindCoordinatorRequest)
tmp.CoordinatorKey = r.ConsumerGroup
tmp.CoordinatorType = CoordinatorGroup
return tmp.encode(pe)
}
func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) {
tmp := new(FindCoordinatorRequest)
if err := tmp.decode(pd, version); err != nil {
return err
}
r.ConsumerGroup = tmp.CoordinatorKey
return nil
}
func (r *ConsumerMetadataRequest) key() int16 {
return 10
}
func (r *ConsumerMetadataRequest) version() int16 {
return 0
}
func (r *ConsumerMetadataRequest) headerVersion() int16 {
return 1
}
func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion {
return V0_8_2_0
}

View File

@ -0,0 +1,82 @@
package sarama
import (
"net"
"strconv"
)
//ConsumerMetadataResponse holds the response for a consumer group meta data requests
type ConsumerMetadataResponse struct {
Err KError
Coordinator *Broker
CoordinatorID int32 // deprecated: use Coordinator.ID()
CoordinatorHost string // deprecated: use Coordinator.Addr()
CoordinatorPort int32 // deprecated: use Coordinator.Addr()
}
func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) {
tmp := new(FindCoordinatorResponse)
if err := tmp.decode(pd, version); err != nil {
return err
}
r.Err = tmp.Err
r.Coordinator = tmp.Coordinator
if tmp.Coordinator == nil {
return nil
}
// this can all go away in 2.0, but we have to fill in deprecated fields to maintain
// backwards compatibility
host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
if err != nil {
return err
}
port, err := strconv.ParseInt(portstr, 10, 32)
if err != nil {
return err
}
r.CoordinatorID = r.Coordinator.ID()
r.CoordinatorHost = host
r.CoordinatorPort = int32(port)
return nil
}
func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error {
if r.Coordinator == nil {
r.Coordinator = new(Broker)
r.Coordinator.id = r.CoordinatorID
r.Coordinator.addr = net.JoinHostPort(r.CoordinatorHost, strconv.Itoa(int(r.CoordinatorPort)))
}
tmp := &FindCoordinatorResponse{
Version: 0,
Err: r.Err,
Coordinator: r.Coordinator,
}
if err := tmp.encode(pe); err != nil {
return err
}
return nil
}
func (r *ConsumerMetadataResponse) key() int16 {
return 10
}
func (r *ConsumerMetadataResponse) version() int16 {
return 0
}
func (r *ConsumerMetadataResponse) headerVersion() int16 {
return 0
}
func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion {
return V0_8_2_0
}

72
vendor/github.com/Shopify/sarama/control_record.go generated vendored Normal file
View File

@ -0,0 +1,72 @@
package sarama
//ControlRecordType ...
type ControlRecordType int
const (
//ControlRecordAbort is a control record for abort
ControlRecordAbort ControlRecordType = iota
//ControlRecordCommit is a control record for commit
ControlRecordCommit
//ControlRecordUnknown is a control record of unknown type
ControlRecordUnknown
)
// Control records are returned as a record by fetchRequest
// However unlike "normal" records, they mean nothing application wise.
// They only serve internal logic for supporting transactions.
type ControlRecord struct {
Version int16
CoordinatorEpoch int32
Type ControlRecordType
}
func (cr *ControlRecord) decode(key, value packetDecoder) error {
var err error
cr.Version, err = value.getInt16()
if err != nil {
return err
}
cr.CoordinatorEpoch, err = value.getInt32()
if err != nil {
return err
}
// There a version for the value part AND the key part. And I have no idea if they are supposed to match or not
// Either way, all these version can only be 0 for now
cr.Version, err = key.getInt16()
if err != nil {
return err
}
recordType, err := key.getInt16()
if err != nil {
return err
}
switch recordType {
case 0:
cr.Type = ControlRecordAbort
case 1:
cr.Type = ControlRecordCommit
default:
// from JAVA implementation:
// UNKNOWN is used to indicate a control type which the client is not aware of and should be ignored
cr.Type = ControlRecordUnknown
}
return nil
}
func (cr *ControlRecord) encode(key, value packetEncoder) {
value.putInt16(cr.Version)
value.putInt32(cr.CoordinatorEpoch)
key.putInt16(cr.Version)
switch cr.Type {
case ControlRecordAbort:
key.putInt16(0)
case ControlRecordCommit:
key.putInt16(1)
}
}

86
vendor/github.com/Shopify/sarama/crc32_field.go generated vendored Normal file
View File

@ -0,0 +1,86 @@
package sarama
import (
"encoding/binary"
"fmt"
"hash/crc32"
"sync"
)
type crcPolynomial int8
const (
crcIEEE crcPolynomial = iota
crcCastagnoli
)
var crc32FieldPool = sync.Pool{}
func acquireCrc32Field(polynomial crcPolynomial) *crc32Field {
val := crc32FieldPool.Get()
if val != nil {
c := val.(*crc32Field)
c.polynomial = polynomial
return c
}
return newCRC32Field(polynomial)
}
func releaseCrc32Field(c *crc32Field) {
crc32FieldPool.Put(c)
}
var castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s.
type crc32Field struct {
startOffset int
polynomial crcPolynomial
}
func (c *crc32Field) saveOffset(in int) {
c.startOffset = in
}
func (c *crc32Field) reserveLength() int {
return 4
}
func newCRC32Field(polynomial crcPolynomial) *crc32Field {
return &crc32Field{polynomial: polynomial}
}
func (c *crc32Field) run(curOffset int, buf []byte) error {
crc, err := c.crc(curOffset, buf)
if err != nil {
return err
}
binary.BigEndian.PutUint32(buf[c.startOffset:], crc)
return nil
}
func (c *crc32Field) check(curOffset int, buf []byte) error {
crc, err := c.crc(curOffset, buf)
if err != nil {
return err
}
expected := binary.BigEndian.Uint32(buf[c.startOffset:])
if crc != expected {
return PacketDecodingError{fmt.Sprintf("CRC didn't match expected %#x got %#x", expected, crc)}
}
return nil
}
func (c *crc32Field) crc(curOffset int, buf []byte) (uint32, error) {
var tab *crc32.Table
switch c.polynomial {
case crcIEEE:
tab = crc32.IEEETable
case crcCastagnoli:
tab = castagnoliTable
default:
return 0, PacketDecodingError{"invalid CRC type"}
}
return crc32.Checksum(buf[c.startOffset+4:curOffset], tab), nil
}

View File

@ -0,0 +1,125 @@
package sarama
import "time"
type CreatePartitionsRequest struct {
TopicPartitions map[string]*TopicPartition
Timeout time.Duration
ValidateOnly bool
}
func (c *CreatePartitionsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(c.TopicPartitions)); err != nil {
return err
}
for topic, partition := range c.TopicPartitions {
if err := pe.putString(topic); err != nil {
return err
}
if err := partition.encode(pe); err != nil {
return err
}
}
pe.putInt32(int32(c.Timeout / time.Millisecond))
pe.putBool(c.ValidateOnly)
return nil
}
func (c *CreatePartitionsRequest) decode(pd packetDecoder, version int16) (err error) {
n, err := pd.getArrayLength()
if err != nil {
return err
}
c.TopicPartitions = make(map[string]*TopicPartition, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
c.TopicPartitions[topic] = new(TopicPartition)
if err := c.TopicPartitions[topic].decode(pd, version); err != nil {
return err
}
}
timeout, err := pd.getInt32()
if err != nil {
return err
}
c.Timeout = time.Duration(timeout) * time.Millisecond
if c.ValidateOnly, err = pd.getBool(); err != nil {
return err
}
return nil
}
func (r *CreatePartitionsRequest) key() int16 {
return 37
}
func (r *CreatePartitionsRequest) version() int16 {
return 0
}
func (r *CreatePartitionsRequest) headerVersion() int16 {
return 1
}
func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion {
return V1_0_0_0
}
type TopicPartition struct {
Count int32
Assignment [][]int32
}
func (t *TopicPartition) encode(pe packetEncoder) error {
pe.putInt32(t.Count)
if len(t.Assignment) == 0 {
pe.putInt32(-1)
return nil
}
if err := pe.putArrayLength(len(t.Assignment)); err != nil {
return err
}
for _, assign := range t.Assignment {
if err := pe.putInt32Array(assign); err != nil {
return err
}
}
return nil
}
func (t *TopicPartition) decode(pd packetDecoder, version int16) (err error) {
if t.Count, err = pd.getInt32(); err != nil {
return err
}
n, err := pd.getInt32()
if err != nil {
return err
}
if n <= 0 {
return nil
}
t.Assignment = make([][]int32, n)
for i := 0; i < int(n); i++ {
if t.Assignment[i], err = pd.getInt32Array(); err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,109 @@
package sarama
import (
"fmt"
"time"
)
type CreatePartitionsResponse struct {
ThrottleTime time.Duration
TopicPartitionErrors map[string]*TopicPartitionError
}
func (c *CreatePartitionsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(c.TopicPartitionErrors)); err != nil {
return err
}
for topic, partitionError := range c.TopicPartitionErrors {
if err := pe.putString(topic); err != nil {
return err
}
if err := partitionError.encode(pe); err != nil {
return err
}
}
return nil
}
func (c *CreatePartitionsResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
c.TopicPartitionErrors = make(map[string]*TopicPartitionError, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
c.TopicPartitionErrors[topic] = new(TopicPartitionError)
if err := c.TopicPartitionErrors[topic].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (r *CreatePartitionsResponse) key() int16 {
return 37
}
func (r *CreatePartitionsResponse) version() int16 {
return 0
}
func (r *CreatePartitionsResponse) headerVersion() int16 {
return 0
}
func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion {
return V1_0_0_0
}
type TopicPartitionError struct {
Err KError
ErrMsg *string
}
func (t *TopicPartitionError) Error() string {
text := t.Err.Error()
if t.ErrMsg != nil {
text = fmt.Sprintf("%s - %s", text, *t.ErrMsg)
}
return text
}
func (t *TopicPartitionError) encode(pe packetEncoder) error {
pe.putInt16(int16(t.Err))
if err := pe.putNullableString(t.ErrMsg); err != nil {
return err
}
return nil
}
func (t *TopicPartitionError) decode(pd packetDecoder, version int16) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
t.Err = KError(kerr)
if t.ErrMsg, err = pd.getNullableString(); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,178 @@
package sarama
import (
"time"
)
type CreateTopicsRequest struct {
Version int16
TopicDetails map[string]*TopicDetail
Timeout time.Duration
ValidateOnly bool
}
func (c *CreateTopicsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(c.TopicDetails)); err != nil {
return err
}
for topic, detail := range c.TopicDetails {
if err := pe.putString(topic); err != nil {
return err
}
if err := detail.encode(pe); err != nil {
return err
}
}
pe.putInt32(int32(c.Timeout / time.Millisecond))
if c.Version >= 1 {
pe.putBool(c.ValidateOnly)
}
return nil
}
func (c *CreateTopicsRequest) decode(pd packetDecoder, version int16) (err error) {
n, err := pd.getArrayLength()
if err != nil {
return err
}
c.TopicDetails = make(map[string]*TopicDetail, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
c.TopicDetails[topic] = new(TopicDetail)
if err = c.TopicDetails[topic].decode(pd, version); err != nil {
return err
}
}
timeout, err := pd.getInt32()
if err != nil {
return err
}
c.Timeout = time.Duration(timeout) * time.Millisecond
if version >= 1 {
c.ValidateOnly, err = pd.getBool()
if err != nil {
return err
}
c.Version = version
}
return nil
}
func (c *CreateTopicsRequest) key() int16 {
return 19
}
func (c *CreateTopicsRequest) version() int16 {
return c.Version
}
func (r *CreateTopicsRequest) headerVersion() int16 {
return 1
}
func (c *CreateTopicsRequest) requiredVersion() KafkaVersion {
switch c.Version {
case 2:
return V1_0_0_0
case 1:
return V0_11_0_0
default:
return V0_10_1_0
}
}
type TopicDetail struct {
NumPartitions int32
ReplicationFactor int16
ReplicaAssignment map[int32][]int32
ConfigEntries map[string]*string
}
func (t *TopicDetail) encode(pe packetEncoder) error {
pe.putInt32(t.NumPartitions)
pe.putInt16(t.ReplicationFactor)
if err := pe.putArrayLength(len(t.ReplicaAssignment)); err != nil {
return err
}
for partition, assignment := range t.ReplicaAssignment {
pe.putInt32(partition)
if err := pe.putInt32Array(assignment); err != nil {
return err
}
}
if err := pe.putArrayLength(len(t.ConfigEntries)); err != nil {
return err
}
for configKey, configValue := range t.ConfigEntries {
if err := pe.putString(configKey); err != nil {
return err
}
if err := pe.putNullableString(configValue); err != nil {
return err
}
}
return nil
}
func (t *TopicDetail) decode(pd packetDecoder, version int16) (err error) {
if t.NumPartitions, err = pd.getInt32(); err != nil {
return err
}
if t.ReplicationFactor, err = pd.getInt16(); err != nil {
return err
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n > 0 {
t.ReplicaAssignment = make(map[int32][]int32, n)
for i := 0; i < n; i++ {
replica, err := pd.getInt32()
if err != nil {
return err
}
if t.ReplicaAssignment[replica], err = pd.getInt32Array(); err != nil {
return err
}
}
}
n, err = pd.getArrayLength()
if err != nil {
return err
}
if n > 0 {
t.ConfigEntries = make(map[string]*string, n)
for i := 0; i < n; i++ {
configKey, err := pd.getString()
if err != nil {
return err
}
if t.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
return err
}
}
}
return nil
}

View File

@ -0,0 +1,127 @@
package sarama
import (
"fmt"
"time"
)
type CreateTopicsResponse struct {
Version int16
ThrottleTime time.Duration
TopicErrors map[string]*TopicError
}
func (c *CreateTopicsResponse) encode(pe packetEncoder) error {
if c.Version >= 2 {
pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
}
if err := pe.putArrayLength(len(c.TopicErrors)); err != nil {
return err
}
for topic, topicError := range c.TopicErrors {
if err := pe.putString(topic); err != nil {
return err
}
if err := topicError.encode(pe, c.Version); err != nil {
return err
}
}
return nil
}
func (c *CreateTopicsResponse) decode(pd packetDecoder, version int16) (err error) {
c.Version = version
if version >= 2 {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
c.TopicErrors = make(map[string]*TopicError, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
c.TopicErrors[topic] = new(TopicError)
if err := c.TopicErrors[topic].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (c *CreateTopicsResponse) key() int16 {
return 19
}
func (c *CreateTopicsResponse) version() int16 {
return c.Version
}
func (c *CreateTopicsResponse) headerVersion() int16 {
return 0
}
func (c *CreateTopicsResponse) requiredVersion() KafkaVersion {
switch c.Version {
case 2:
return V1_0_0_0
case 1:
return V0_11_0_0
default:
return V0_10_1_0
}
}
type TopicError struct {
Err KError
ErrMsg *string
}
func (t *TopicError) Error() string {
text := t.Err.Error()
if t.ErrMsg != nil {
text = fmt.Sprintf("%s - %s", text, *t.ErrMsg)
}
return text
}
func (t *TopicError) encode(pe packetEncoder, version int16) error {
pe.putInt16(int16(t.Err))
if version >= 1 {
if err := pe.putNullableString(t.ErrMsg); err != nil {
return err
}
}
return nil
}
func (t *TopicError) decode(pd packetDecoder, version int16) (err error) {
kErr, err := pd.getInt16()
if err != nil {
return err
}
t.Err = KError(kErr)
if version >= 1 {
if t.ErrMsg, err = pd.getNullableString(); err != nil {
return err
}
}
return nil
}

63
vendor/github.com/Shopify/sarama/decompress.go generated vendored Normal file
View File

@ -0,0 +1,63 @@
package sarama
import (
"bytes"
"compress/gzip"
"fmt"
"io/ioutil"
"sync"
snappy "github.com/eapache/go-xerial-snappy"
"github.com/pierrec/lz4"
)
var (
lz4ReaderPool = sync.Pool{
New: func() interface{} {
return lz4.NewReader(nil)
},
}
gzipReaderPool sync.Pool
)
func decompress(cc CompressionCodec, data []byte) ([]byte, error) {
switch cc {
case CompressionNone:
return data, nil
case CompressionGZIP:
var (
err error
reader *gzip.Reader
readerIntf = gzipReaderPool.Get()
)
if readerIntf != nil {
reader = readerIntf.(*gzip.Reader)
} else {
reader, err = gzip.NewReader(bytes.NewReader(data))
if err != nil {
return nil, err
}
}
defer gzipReaderPool.Put(reader)
if err := reader.Reset(bytes.NewReader(data)); err != nil {
return nil, err
}
return ioutil.ReadAll(reader)
case CompressionSnappy:
return snappy.Decode(data)
case CompressionLZ4:
reader := lz4ReaderPool.Get().(*lz4.Reader)
defer lz4ReaderPool.Put(reader)
reader.Reset(bytes.NewReader(data))
return ioutil.ReadAll(reader)
case CompressionZSTD:
return zstdDecompress(nil, data)
default:
return nil, PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", cc)}
}
}

View File

@ -0,0 +1,34 @@
package sarama
type DeleteGroupsRequest struct {
Groups []string
}
func (r *DeleteGroupsRequest) encode(pe packetEncoder) error {
return pe.putStringArray(r.Groups)
}
func (r *DeleteGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
r.Groups, err = pd.getStringArray()
return
}
func (r *DeleteGroupsRequest) key() int16 {
return 42
}
func (r *DeleteGroupsRequest) version() int16 {
return 0
}
func (r *DeleteGroupsRequest) headerVersion() int16 {
return 1
}
func (r *DeleteGroupsRequest) requiredVersion() KafkaVersion {
return V1_1_0_0
}
func (r *DeleteGroupsRequest) AddGroup(group string) {
r.Groups = append(r.Groups, group)
}

View File

@ -0,0 +1,74 @@
package sarama
import (
"time"
)
type DeleteGroupsResponse struct {
ThrottleTime time.Duration
GroupErrorCodes map[string]KError
}
func (r *DeleteGroupsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(r.GroupErrorCodes)); err != nil {
return err
}
for groupID, errorCode := range r.GroupErrorCodes {
if err := pe.putString(groupID); err != nil {
return err
}
pe.putInt16(int16(errorCode))
}
return nil
}
func (r *DeleteGroupsResponse) decode(pd packetDecoder, version int16) error {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n == 0 {
return nil
}
r.GroupErrorCodes = make(map[string]KError, n)
for i := 0; i < n; i++ {
groupID, err := pd.getString()
if err != nil {
return err
}
errorCode, err := pd.getInt16()
if err != nil {
return err
}
r.GroupErrorCodes[groupID] = KError(errorCode)
}
return nil
}
func (r *DeleteGroupsResponse) key() int16 {
return 42
}
func (r *DeleteGroupsResponse) version() int16 {
return 0
}
func (r *DeleteGroupsResponse) headerVersion() int16 {
return 0
}
func (r *DeleteGroupsResponse) requiredVersion() KafkaVersion {
return V1_1_0_0
}

View File

@ -0,0 +1,130 @@
package sarama
import (
"sort"
"time"
)
// request message format is:
// [topic] timeout(int32)
// where topic is:
// name(string) [partition]
// where partition is:
// id(int32) offset(int64)
type DeleteRecordsRequest struct {
Topics map[string]*DeleteRecordsRequestTopic
Timeout time.Duration
}
func (d *DeleteRecordsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(d.Topics)); err != nil {
return err
}
keys := make([]string, 0, len(d.Topics))
for topic := range d.Topics {
keys = append(keys, topic)
}
sort.Strings(keys)
for _, topic := range keys {
if err := pe.putString(topic); err != nil {
return err
}
if err := d.Topics[topic].encode(pe); err != nil {
return err
}
}
pe.putInt32(int32(d.Timeout / time.Millisecond))
return nil
}
func (d *DeleteRecordsRequest) decode(pd packetDecoder, version int16) error {
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n > 0 {
d.Topics = make(map[string]*DeleteRecordsRequestTopic, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
details := new(DeleteRecordsRequestTopic)
if err = details.decode(pd, version); err != nil {
return err
}
d.Topics[topic] = details
}
}
timeout, err := pd.getInt32()
if err != nil {
return err
}
d.Timeout = time.Duration(timeout) * time.Millisecond
return nil
}
func (d *DeleteRecordsRequest) key() int16 {
return 21
}
func (d *DeleteRecordsRequest) version() int16 {
return 0
}
func (d *DeleteRecordsRequest) headerVersion() int16 {
return 1
}
func (d *DeleteRecordsRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}
type DeleteRecordsRequestTopic struct {
PartitionOffsets map[int32]int64 // partition => offset
}
func (t *DeleteRecordsRequestTopic) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(t.PartitionOffsets)); err != nil {
return err
}
keys := make([]int32, 0, len(t.PartitionOffsets))
for partition := range t.PartitionOffsets {
keys = append(keys, partition)
}
sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
for _, partition := range keys {
pe.putInt32(partition)
pe.putInt64(t.PartitionOffsets[partition])
}
return nil
}
func (t *DeleteRecordsRequestTopic) decode(pd packetDecoder, version int16) error {
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n > 0 {
t.PartitionOffsets = make(map[int32]int64, n)
for i := 0; i < n; i++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
offset, err := pd.getInt64()
if err != nil {
return err
}
t.PartitionOffsets[partition] = offset
}
}
return nil
}

View File

@ -0,0 +1,162 @@
package sarama
import (
"sort"
"time"
)
// response message format is:
// throttleMs(int32) [topic]
// where topic is:
// name(string) [partition]
// where partition is:
// id(int32) low_watermark(int64) error_code(int16)
type DeleteRecordsResponse struct {
Version int16
ThrottleTime time.Duration
Topics map[string]*DeleteRecordsResponseTopic
}
func (d *DeleteRecordsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(d.Topics)); err != nil {
return err
}
keys := make([]string, 0, len(d.Topics))
for topic := range d.Topics {
keys = append(keys, topic)
}
sort.Strings(keys)
for _, topic := range keys {
if err := pe.putString(topic); err != nil {
return err
}
if err := d.Topics[topic].encode(pe); err != nil {
return err
}
}
return nil
}
func (d *DeleteRecordsResponse) decode(pd packetDecoder, version int16) error {
d.Version = version
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n > 0 {
d.Topics = make(map[string]*DeleteRecordsResponseTopic, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
details := new(DeleteRecordsResponseTopic)
if err = details.decode(pd, version); err != nil {
return err
}
d.Topics[topic] = details
}
}
return nil
}
func (d *DeleteRecordsResponse) key() int16 {
return 21
}
func (d *DeleteRecordsResponse) version() int16 {
return 0
}
func (d *DeleteRecordsResponse) headerVersion() int16 {
return 0
}
func (d *DeleteRecordsResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}
type DeleteRecordsResponseTopic struct {
Partitions map[int32]*DeleteRecordsResponsePartition
}
func (t *DeleteRecordsResponseTopic) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(t.Partitions)); err != nil {
return err
}
keys := make([]int32, 0, len(t.Partitions))
for partition := range t.Partitions {
keys = append(keys, partition)
}
sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
for _, partition := range keys {
pe.putInt32(partition)
if err := t.Partitions[partition].encode(pe); err != nil {
return err
}
}
return nil
}
func (t *DeleteRecordsResponseTopic) decode(pd packetDecoder, version int16) error {
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n > 0 {
t.Partitions = make(map[int32]*DeleteRecordsResponsePartition, n)
for i := 0; i < n; i++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
details := new(DeleteRecordsResponsePartition)
if err = details.decode(pd, version); err != nil {
return err
}
t.Partitions[partition] = details
}
}
return nil
}
type DeleteRecordsResponsePartition struct {
LowWatermark int64
Err KError
}
func (t *DeleteRecordsResponsePartition) encode(pe packetEncoder) error {
pe.putInt64(t.LowWatermark)
pe.putInt16(int16(t.Err))
return nil
}
func (t *DeleteRecordsResponsePartition) decode(pd packetDecoder, version int16) error {
lowWatermark, err := pd.getInt64()
if err != nil {
return err
}
t.LowWatermark = lowWatermark
kErr, err := pd.getInt16()
if err != nil {
return err
}
t.Err = KError(kErr)
return nil
}

View File

@ -0,0 +1,52 @@
package sarama
import "time"
type DeleteTopicsRequest struct {
Version int16
Topics []string
Timeout time.Duration
}
func (d *DeleteTopicsRequest) encode(pe packetEncoder) error {
if err := pe.putStringArray(d.Topics); err != nil {
return err
}
pe.putInt32(int32(d.Timeout / time.Millisecond))
return nil
}
func (d *DeleteTopicsRequest) decode(pd packetDecoder, version int16) (err error) {
if d.Topics, err = pd.getStringArray(); err != nil {
return err
}
timeout, err := pd.getInt32()
if err != nil {
return err
}
d.Timeout = time.Duration(timeout) * time.Millisecond
d.Version = version
return nil
}
func (d *DeleteTopicsRequest) key() int16 {
return 20
}
func (d *DeleteTopicsRequest) version() int16 {
return d.Version
}
func (d *DeleteTopicsRequest) headerVersion() int16 {
return 1
}
func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion {
switch d.Version {
case 1:
return V0_11_0_0
default:
return V0_10_1_0
}
}

View File

@ -0,0 +1,82 @@
package sarama
import "time"
type DeleteTopicsResponse struct {
Version int16
ThrottleTime time.Duration
TopicErrorCodes map[string]KError
}
func (d *DeleteTopicsResponse) encode(pe packetEncoder) error {
if d.Version >= 1 {
pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
}
if err := pe.putArrayLength(len(d.TopicErrorCodes)); err != nil {
return err
}
for topic, errorCode := range d.TopicErrorCodes {
if err := pe.putString(topic); err != nil {
return err
}
pe.putInt16(int16(errorCode))
}
return nil
}
func (d *DeleteTopicsResponse) decode(pd packetDecoder, version int16) (err error) {
if version >= 1 {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
d.Version = version
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
d.TopicErrorCodes = make(map[string]KError, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
errorCode, err := pd.getInt16()
if err != nil {
return err
}
d.TopicErrorCodes[topic] = KError(errorCode)
}
return nil
}
func (d *DeleteTopicsResponse) key() int16 {
return 20
}
func (d *DeleteTopicsResponse) version() int16 {
return d.Version
}
func (d *DeleteTopicsResponse) headerVersion() int16 {
return 0
}
func (d *DeleteTopicsResponse) requiredVersion() KafkaVersion {
switch d.Version {
case 1:
return V0_11_0_0
default:
return V0_10_1_0
}
}

View File

@ -0,0 +1,116 @@
package sarama
type DescribeConfigsRequest struct {
Version int16
Resources []*ConfigResource
IncludeSynonyms bool
}
type ConfigResource struct {
Type ConfigResourceType
Name string
ConfigNames []string
}
func (r *DescribeConfigsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(r.Resources)); err != nil {
return err
}
for _, c := range r.Resources {
pe.putInt8(int8(c.Type))
if err := pe.putString(c.Name); err != nil {
return err
}
if len(c.ConfigNames) == 0 {
pe.putInt32(-1)
continue
}
if err := pe.putStringArray(c.ConfigNames); err != nil {
return err
}
}
if r.Version >= 1 {
pe.putBool(r.IncludeSynonyms)
}
return nil
}
func (r *DescribeConfigsRequest) decode(pd packetDecoder, version int16) (err error) {
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Resources = make([]*ConfigResource, n)
for i := 0; i < n; i++ {
r.Resources[i] = &ConfigResource{}
t, err := pd.getInt8()
if err != nil {
return err
}
r.Resources[i].Type = ConfigResourceType(t)
name, err := pd.getString()
if err != nil {
return err
}
r.Resources[i].Name = name
confLength, err := pd.getArrayLength()
if err != nil {
return err
}
if confLength == -1 {
continue
}
cfnames := make([]string, confLength)
for i := 0; i < confLength; i++ {
s, err := pd.getString()
if err != nil {
return err
}
cfnames[i] = s
}
r.Resources[i].ConfigNames = cfnames
}
r.Version = version
if r.Version >= 1 {
b, err := pd.getBool()
if err != nil {
return err
}
r.IncludeSynonyms = b
}
return nil
}
func (r *DescribeConfigsRequest) key() int16 {
return 32
}
func (r *DescribeConfigsRequest) version() int16 {
return r.Version
}
func (r *DescribeConfigsRequest) headerVersion() int16 {
return 1
}
func (r *DescribeConfigsRequest) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V1_1_0_0
case 2:
return V2_0_0_0
default:
return V0_11_0_0
}
}

View File

@ -0,0 +1,327 @@
package sarama
import (
"fmt"
"time"
)
type ConfigSource int8
func (s ConfigSource) String() string {
switch s {
case SourceUnknown:
return "Unknown"
case SourceTopic:
return "Topic"
case SourceDynamicBroker:
return "DynamicBroker"
case SourceDynamicDefaultBroker:
return "DynamicDefaultBroker"
case SourceStaticBroker:
return "StaticBroker"
case SourceDefault:
return "Default"
}
return fmt.Sprintf("Source Invalid: %d", int(s))
}
const (
SourceUnknown ConfigSource = iota
SourceTopic
SourceDynamicBroker
SourceDynamicDefaultBroker
SourceStaticBroker
SourceDefault
)
type DescribeConfigsResponse struct {
Version int16
ThrottleTime time.Duration
Resources []*ResourceResponse
}
type ResourceResponse struct {
ErrorCode int16
ErrorMsg string
Type ConfigResourceType
Name string
Configs []*ConfigEntry
}
type ConfigEntry struct {
Name string
Value string
ReadOnly bool
Default bool
Source ConfigSource
Sensitive bool
Synonyms []*ConfigSynonym
}
type ConfigSynonym struct {
ConfigName string
ConfigValue string
Source ConfigSource
}
func (r *DescribeConfigsResponse) encode(pe packetEncoder) (err error) {
pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
if err = pe.putArrayLength(len(r.Resources)); err != nil {
return err
}
for _, c := range r.Resources {
if err = c.encode(pe, r.Version); err != nil {
return err
}
}
return nil
}
func (r *DescribeConfigsResponse) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Resources = make([]*ResourceResponse, n)
for i := 0; i < n; i++ {
rr := &ResourceResponse{}
if err := rr.decode(pd, version); err != nil {
return err
}
r.Resources[i] = rr
}
return nil
}
func (r *DescribeConfigsResponse) key() int16 {
return 32
}
func (r *DescribeConfigsResponse) version() int16 {
return r.Version
}
func (r *DescribeConfigsResponse) headerVersion() int16 {
return 0
}
func (r *DescribeConfigsResponse) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V1_0_0_0
case 2:
return V2_0_0_0
default:
return V0_11_0_0
}
}
func (r *ResourceResponse) encode(pe packetEncoder, version int16) (err error) {
pe.putInt16(r.ErrorCode)
if err = pe.putString(r.ErrorMsg); err != nil {
return err
}
pe.putInt8(int8(r.Type))
if err = pe.putString(r.Name); err != nil {
return err
}
if err = pe.putArrayLength(len(r.Configs)); err != nil {
return err
}
for _, c := range r.Configs {
if err = c.encode(pe, version); err != nil {
return err
}
}
return nil
}
func (r *ResourceResponse) decode(pd packetDecoder, version int16) (err error) {
ec, err := pd.getInt16()
if err != nil {
return err
}
r.ErrorCode = ec
em, err := pd.getString()
if err != nil {
return err
}
r.ErrorMsg = em
t, err := pd.getInt8()
if err != nil {
return err
}
r.Type = ConfigResourceType(t)
name, err := pd.getString()
if err != nil {
return err
}
r.Name = name
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Configs = make([]*ConfigEntry, n)
for i := 0; i < n; i++ {
c := &ConfigEntry{}
if err := c.decode(pd, version); err != nil {
return err
}
r.Configs[i] = c
}
return nil
}
func (r *ConfigEntry) encode(pe packetEncoder, version int16) (err error) {
if err = pe.putString(r.Name); err != nil {
return err
}
if err = pe.putString(r.Value); err != nil {
return err
}
pe.putBool(r.ReadOnly)
if version <= 0 {
pe.putBool(r.Default)
pe.putBool(r.Sensitive)
} else {
pe.putInt8(int8(r.Source))
pe.putBool(r.Sensitive)
if err := pe.putArrayLength(len(r.Synonyms)); err != nil {
return err
}
for _, c := range r.Synonyms {
if err = c.encode(pe, version); err != nil {
return err
}
}
}
return nil
}
//https://cwiki.apache.org/confluence/display/KAFKA/KIP-226+-+Dynamic+Broker+Configuration
func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) {
if version == 0 {
r.Source = SourceUnknown
}
name, err := pd.getString()
if err != nil {
return err
}
r.Name = name
value, err := pd.getString()
if err != nil {
return err
}
r.Value = value
read, err := pd.getBool()
if err != nil {
return err
}
r.ReadOnly = read
if version == 0 {
defaultB, err := pd.getBool()
if err != nil {
return err
}
r.Default = defaultB
if defaultB {
r.Source = SourceDefault
}
} else {
source, err := pd.getInt8()
if err != nil {
return err
}
r.Source = ConfigSource(source)
r.Default = r.Source == SourceDefault
}
sensitive, err := pd.getBool()
if err != nil {
return err
}
r.Sensitive = sensitive
if version > 0 {
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Synonyms = make([]*ConfigSynonym, n)
for i := 0; i < n; i++ {
s := &ConfigSynonym{}
if err := s.decode(pd, version); err != nil {
return err
}
r.Synonyms[i] = s
}
}
return nil
}
func (c *ConfigSynonym) encode(pe packetEncoder, version int16) (err error) {
err = pe.putString(c.ConfigName)
if err != nil {
return err
}
err = pe.putString(c.ConfigValue)
if err != nil {
return err
}
pe.putInt8(int8(c.Source))
return nil
}
func (c *ConfigSynonym) decode(pd packetDecoder, version int16) error {
name, err := pd.getString()
if err != nil {
return nil
}
c.ConfigName = name
value, err := pd.getString()
if err != nil {
return nil
}
c.ConfigValue = value
source, err := pd.getInt8()
if err != nil {
return nil
}
c.Source = ConfigSource(source)
return nil
}

View File

@ -0,0 +1,34 @@
package sarama
type DescribeGroupsRequest struct {
Groups []string
}
func (r *DescribeGroupsRequest) encode(pe packetEncoder) error {
return pe.putStringArray(r.Groups)
}
func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
r.Groups, err = pd.getStringArray()
return
}
func (r *DescribeGroupsRequest) key() int16 {
return 15
}
func (r *DescribeGroupsRequest) version() int16 {
return 0
}
func (r *DescribeGroupsRequest) headerVersion() int16 {
return 1
}
func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion {
return V0_9_0_0
}
func (r *DescribeGroupsRequest) AddGroup(group string) {
r.Groups = append(r.Groups, group)
}

View File

@ -0,0 +1,191 @@
package sarama
type DescribeGroupsResponse struct {
Groups []*GroupDescription
}
func (r *DescribeGroupsResponse) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(r.Groups)); err != nil {
return err
}
for _, groupDescription := range r.Groups {
if err := groupDescription.encode(pe); err != nil {
return err
}
}
return nil
}
func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) {
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Groups = make([]*GroupDescription, n)
for i := 0; i < n; i++ {
r.Groups[i] = new(GroupDescription)
if err := r.Groups[i].decode(pd); err != nil {
return err
}
}
return nil
}
func (r *DescribeGroupsResponse) key() int16 {
return 15
}
func (r *DescribeGroupsResponse) version() int16 {
return 0
}
func (r *DescribeGroupsResponse) headerVersion() int16 {
return 0
}
func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion {
return V0_9_0_0
}
type GroupDescription struct {
Err KError
GroupId string
State string
ProtocolType string
Protocol string
Members map[string]*GroupMemberDescription
}
func (gd *GroupDescription) encode(pe packetEncoder) error {
pe.putInt16(int16(gd.Err))
if err := pe.putString(gd.GroupId); err != nil {
return err
}
if err := pe.putString(gd.State); err != nil {
return err
}
if err := pe.putString(gd.ProtocolType); err != nil {
return err
}
if err := pe.putString(gd.Protocol); err != nil {
return err
}
if err := pe.putArrayLength(len(gd.Members)); err != nil {
return err
}
for memberId, groupMemberDescription := range gd.Members {
if err := pe.putString(memberId); err != nil {
return err
}
if err := groupMemberDescription.encode(pe); err != nil {
return err
}
}
return nil
}
func (gd *GroupDescription) decode(pd packetDecoder) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
gd.Err = KError(kerr)
if gd.GroupId, err = pd.getString(); err != nil {
return
}
if gd.State, err = pd.getString(); err != nil {
return
}
if gd.ProtocolType, err = pd.getString(); err != nil {
return
}
if gd.Protocol, err = pd.getString(); err != nil {
return
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n == 0 {
return nil
}
gd.Members = make(map[string]*GroupMemberDescription)
for i := 0; i < n; i++ {
memberId, err := pd.getString()
if err != nil {
return err
}
gd.Members[memberId] = new(GroupMemberDescription)
if err := gd.Members[memberId].decode(pd); err != nil {
return err
}
}
return nil
}
type GroupMemberDescription struct {
ClientId string
ClientHost string
MemberMetadata []byte
MemberAssignment []byte
}
func (gmd *GroupMemberDescription) encode(pe packetEncoder) error {
if err := pe.putString(gmd.ClientId); err != nil {
return err
}
if err := pe.putString(gmd.ClientHost); err != nil {
return err
}
if err := pe.putBytes(gmd.MemberMetadata); err != nil {
return err
}
if err := pe.putBytes(gmd.MemberAssignment); err != nil {
return err
}
return nil
}
func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) {
if gmd.ClientId, err = pd.getString(); err != nil {
return
}
if gmd.ClientHost, err = pd.getString(); err != nil {
return
}
if gmd.MemberMetadata, err = pd.getBytes(); err != nil {
return
}
if gmd.MemberAssignment, err = pd.getBytes(); err != nil {
return
}
return nil
}
func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
assignment := new(ConsumerGroupMemberAssignment)
err := decode(gmd.MemberAssignment, assignment)
return assignment, err
}
func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) {
metadata := new(ConsumerGroupMemberMetadata)
err := decode(gmd.MemberMetadata, metadata)
return metadata, err
}

View File

@ -0,0 +1,87 @@
package sarama
// DescribeLogDirsRequest is a describe request to get partitions' log size
type DescribeLogDirsRequest struct {
// Version 0 and 1 are equal
// The version number is bumped to indicate that on quota violation brokers send out responses before throttling.
Version int16
// If this is an empty array, all topics will be queried
DescribeTopics []DescribeLogDirsRequestTopic
}
// DescribeLogDirsRequestTopic is a describe request about the log dir of one or more partitions within a Topic
type DescribeLogDirsRequestTopic struct {
Topic string
PartitionIDs []int32
}
func (r *DescribeLogDirsRequest) encode(pe packetEncoder) error {
length := len(r.DescribeTopics)
if length == 0 {
// In order to query all topics we must send null
length = -1
}
if err := pe.putArrayLength(length); err != nil {
return err
}
for _, d := range r.DescribeTopics {
if err := pe.putString(d.Topic); err != nil {
return err
}
if err := pe.putInt32Array(d.PartitionIDs); err != nil {
return err
}
}
return nil
}
func (r *DescribeLogDirsRequest) decode(pd packetDecoder, version int16) error {
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n == -1 {
n = 0
}
topics := make([]DescribeLogDirsRequestTopic, n)
for i := 0; i < n; i++ {
topics[i] = DescribeLogDirsRequestTopic{}
topic, err := pd.getString()
if err != nil {
return err
}
topics[i].Topic = topic
pIDs, err := pd.getInt32Array()
if err != nil {
return err
}
topics[i].PartitionIDs = pIDs
}
r.DescribeTopics = topics
return nil
}
func (r *DescribeLogDirsRequest) key() int16 {
return 35
}
func (r *DescribeLogDirsRequest) version() int16 {
return r.Version
}
func (r *DescribeLogDirsRequest) headerVersion() int16 {
return 1
}
func (r *DescribeLogDirsRequest) requiredVersion() KafkaVersion {
return V1_0_0_0
}

View File

@ -0,0 +1,229 @@
package sarama
import "time"
type DescribeLogDirsResponse struct {
ThrottleTime time.Duration
// Version 0 and 1 are equal
// The version number is bumped to indicate that on quota violation brokers send out responses before throttling.
Version int16
LogDirs []DescribeLogDirsResponseDirMetadata
}
func (r *DescribeLogDirsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(r.LogDirs)); err != nil {
return err
}
for _, dir := range r.LogDirs {
if err := dir.encode(pe); err != nil {
return err
}
}
return nil
}
func (r *DescribeLogDirsResponse) decode(pd packetDecoder, version int16) error {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
// Decode array of DescribeLogDirsResponseDirMetadata
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.LogDirs = make([]DescribeLogDirsResponseDirMetadata, n)
for i := 0; i < n; i++ {
dir := DescribeLogDirsResponseDirMetadata{}
if err := dir.decode(pd, version); err != nil {
return err
}
r.LogDirs[i] = dir
}
return nil
}
func (r *DescribeLogDirsResponse) key() int16 {
return 35
}
func (r *DescribeLogDirsResponse) version() int16 {
return r.Version
}
func (r *DescribeLogDirsResponse) headerVersion() int16 {
return 0
}
func (r *DescribeLogDirsResponse) requiredVersion() KafkaVersion {
return V1_0_0_0
}
type DescribeLogDirsResponseDirMetadata struct {
ErrorCode KError
// The absolute log directory path
Path string
Topics []DescribeLogDirsResponseTopic
}
func (r *DescribeLogDirsResponseDirMetadata) encode(pe packetEncoder) error {
pe.putInt16(int16(r.ErrorCode))
if err := pe.putString(r.Path); err != nil {
return err
}
if err := pe.putArrayLength(len(r.Topics)); err != nil {
return err
}
for _, topic := range r.Topics {
if err := topic.encode(pe); err != nil {
return err
}
}
return nil
}
func (r *DescribeLogDirsResponseDirMetadata) decode(pd packetDecoder, version int16) error {
errCode, err := pd.getInt16()
if err != nil {
return err
}
r.ErrorCode = KError(errCode)
path, err := pd.getString()
if err != nil {
return err
}
r.Path = path
// Decode array of DescribeLogDirsResponseTopic
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Topics = make([]DescribeLogDirsResponseTopic, n)
for i := 0; i < n; i++ {
t := DescribeLogDirsResponseTopic{}
if err := t.decode(pd, version); err != nil {
return err
}
r.Topics[i] = t
}
return nil
}
// DescribeLogDirsResponseTopic contains a topic's partitions descriptions
type DescribeLogDirsResponseTopic struct {
Topic string
Partitions []DescribeLogDirsResponsePartition
}
func (r *DescribeLogDirsResponseTopic) encode(pe packetEncoder) error {
if err := pe.putString(r.Topic); err != nil {
return err
}
if err := pe.putArrayLength(len(r.Partitions)); err != nil {
return err
}
for _, partition := range r.Partitions {
if err := partition.encode(pe); err != nil {
return err
}
}
return nil
}
func (r *DescribeLogDirsResponseTopic) decode(pd packetDecoder, version int16) error {
t, err := pd.getString()
if err != nil {
return err
}
r.Topic = t
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Partitions = make([]DescribeLogDirsResponsePartition, n)
for i := 0; i < n; i++ {
p := DescribeLogDirsResponsePartition{}
if err := p.decode(pd, version); err != nil {
return err
}
r.Partitions[i] = p
}
return nil
}
// DescribeLogDirsResponsePartition describes a partition's log directory
type DescribeLogDirsResponsePartition struct {
PartitionID int32
// The size of the log segments of the partition in bytes.
Size int64
// The lag of the log's LEO w.r.t. partition's HW (if it is the current log for the partition) or
// current replica's LEO (if it is the future log for the partition)
OffsetLag int64
// True if this log is created by AlterReplicaLogDirsRequest and will replace the current log of
// the replica in the future.
IsTemporary bool
}
func (r *DescribeLogDirsResponsePartition) encode(pe packetEncoder) error {
pe.putInt32(r.PartitionID)
pe.putInt64(r.Size)
pe.putInt64(r.OffsetLag)
pe.putBool(r.IsTemporary)
return nil
}
func (r *DescribeLogDirsResponsePartition) decode(pd packetDecoder, version int16) error {
pID, err := pd.getInt32()
if err != nil {
return err
}
r.PartitionID = pID
size, err := pd.getInt64()
if err != nil {
return err
}
r.Size = size
lag, err := pd.getInt64()
if err != nil {
return err
}
r.OffsetLag = lag
isTemp, err := pd.getBool()
if err != nil {
return err
}
r.IsTemporary = isTemp
return nil
}

10
vendor/github.com/Shopify/sarama/dev.yml generated vendored Normal file
View File

@ -0,0 +1,10 @@
name: sarama
up:
- go:
version: '1.14'
commands:
test:
run: make test
desc: 'run unit tests'

94
vendor/github.com/Shopify/sarama/encoder_decoder.go generated vendored Normal file
View File

@ -0,0 +1,94 @@
package sarama
import (
"fmt"
"github.com/rcrowley/go-metrics"
)
// Encoder is the interface that wraps the basic Encode method.
// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules.
type encoder interface {
encode(pe packetEncoder) error
}
type encoderWithHeader interface {
encoder
headerVersion() int16
}
// Encode takes an Encoder and turns it into bytes while potentially recording metrics.
func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) {
if e == nil {
return nil, nil
}
var prepEnc prepEncoder
var realEnc realEncoder
err := e.encode(&prepEnc)
if err != nil {
return nil, err
}
if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) {
return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)}
}
realEnc.raw = make([]byte, prepEnc.length)
realEnc.registry = metricRegistry
err = e.encode(&realEnc)
if err != nil {
return nil, err
}
return realEnc.raw, nil
}
// Decoder is the interface that wraps the basic Decode method.
// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules.
type decoder interface {
decode(pd packetDecoder) error
}
type versionedDecoder interface {
decode(pd packetDecoder, version int16) error
}
// Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes,
// interpreted using Kafka's encoding rules.
func decode(buf []byte, in decoder) error {
if buf == nil {
return nil
}
helper := realDecoder{raw: buf}
err := in.decode(&helper)
if err != nil {
return err
}
if helper.off != len(buf) {
return PacketDecodingError{"invalid length"}
}
return nil
}
func versionedDecode(buf []byte, in versionedDecoder, version int16) error {
if buf == nil {
return nil
}
helper := realDecoder{raw: buf}
err := in.decode(&helper, version)
if err != nil {
return err
}
if helper.off != len(buf) {
return PacketDecodingError{"invalid length"}
}
return nil
}

54
vendor/github.com/Shopify/sarama/end_txn_request.go generated vendored Normal file
View File

@ -0,0 +1,54 @@
package sarama
type EndTxnRequest struct {
TransactionalID string
ProducerID int64
ProducerEpoch int16
TransactionResult bool
}
func (a *EndTxnRequest) encode(pe packetEncoder) error {
if err := pe.putString(a.TransactionalID); err != nil {
return err
}
pe.putInt64(a.ProducerID)
pe.putInt16(a.ProducerEpoch)
pe.putBool(a.TransactionResult)
return nil
}
func (a *EndTxnRequest) decode(pd packetDecoder, version int16) (err error) {
if a.TransactionalID, err = pd.getString(); err != nil {
return err
}
if a.ProducerID, err = pd.getInt64(); err != nil {
return err
}
if a.ProducerEpoch, err = pd.getInt16(); err != nil {
return err
}
if a.TransactionResult, err = pd.getBool(); err != nil {
return err
}
return nil
}
func (a *EndTxnRequest) key() int16 {
return 26
}
func (a *EndTxnRequest) version() int16 {
return 0
}
func (r *EndTxnRequest) headerVersion() int16 {
return 1
}
func (a *EndTxnRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

48
vendor/github.com/Shopify/sarama/end_txn_response.go generated vendored Normal file
View File

@ -0,0 +1,48 @@
package sarama
import (
"time"
)
type EndTxnResponse struct {
ThrottleTime time.Duration
Err KError
}
func (e *EndTxnResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(e.ThrottleTime / time.Millisecond))
pe.putInt16(int16(e.Err))
return nil
}
func (e *EndTxnResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
e.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
kerr, err := pd.getInt16()
if err != nil {
return err
}
e.Err = KError(kerr)
return nil
}
func (e *EndTxnResponse) key() int16 {
return 25
}
func (e *EndTxnResponse) version() int16 {
return 0
}
func (r *EndTxnResponse) headerVersion() int16 {
return 0
}
func (e *EndTxnResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}

385
vendor/github.com/Shopify/sarama/errors.go generated vendored Normal file
View File

@ -0,0 +1,385 @@
package sarama
import (
"errors"
"fmt"
)
// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored
// or otherwise failed to respond.
var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)")
// ErrClosedClient is the error returned when a method is called on a client that has been closed.
var ErrClosedClient = errors.New("kafka: tried to use a client that was closed")
// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does
// not contain the expected information.
var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks")
// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index
// (meaning one outside of the range [0...numPartitions-1]).
var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index")
// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting.
var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated")
// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected.
var ErrNotConnected = errors.New("kafka: broker not connected")
// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected
// when requesting messages, since as an optimization the server is allowed to return a partial message at the end
// of the message set.
var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected")
// ErrShuttingDown is returned when a producer receives a message during shutdown.
var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down")
// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max
var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max")
// ErrConsumerOffsetNotAdvanced is returned when a partition consumer didn't advance its offset after parsing
// a RecordBatch.
var ErrConsumerOffsetNotAdvanced = errors.New("kafka: consumer offset was not advanced after a RecordBatch")
// ErrControllerNotAvailable is returned when server didn't give correct controller id. May be kafka server's version
// is lower than 0.10.0.0.
var ErrControllerNotAvailable = errors.New("kafka: controller is not available")
// ErrNoTopicsToUpdateMetadata is returned when Meta.Full is set to false but no specific topics were found to update
// the metadata.
var ErrNoTopicsToUpdateMetadata = errors.New("kafka: no specific topics to update metadata")
// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example,
// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that.
type PacketEncodingError struct {
Info string
}
func (err PacketEncodingError) Error() string {
return fmt.Sprintf("kafka: error encoding packet: %s", err.Info)
}
// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response.
// This can be a bad CRC or length field, or any other invalid value.
type PacketDecodingError struct {
Info string
}
func (err PacketDecodingError) Error() string {
return fmt.Sprintf("kafka: error decoding packet: %s", err.Info)
}
// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer)
// when the specified configuration is invalid.
type ConfigurationError string
func (err ConfigurationError) Error() string {
return "kafka: invalid configuration (" + string(err) + ")"
}
// KError is the type of error that can be returned directly by the Kafka broker.
// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes
type KError int16
// MultiError is used to contain multi error.
type MultiError struct {
Errors *[]error
}
func (mErr MultiError) Error() string {
var errString = ""
for _, err := range *mErr.Errors {
errString += err.Error() + ","
}
return errString
}
func (mErr MultiError) PrettyError() string {
var errString = ""
for _, err := range *mErr.Errors {
errString += err.Error() + "\n"
}
return errString
}
// ErrDeleteRecords is the type of error returned when fail to delete the required records
type ErrDeleteRecords struct {
MultiError
}
func (err ErrDeleteRecords) Error() string {
return "kafka server: failed to delete records " + err.MultiError.Error()
}
type ErrReassignPartitions struct {
MultiError
}
func (err ErrReassignPartitions) Error() string {
return fmt.Sprintf("failed to reassign partitions for topic: \n%s", err.MultiError.PrettyError())
}
// Numeric error codes returned by the Kafka server.
const (
ErrNoError KError = 0
ErrUnknown KError = -1
ErrOffsetOutOfRange KError = 1
ErrInvalidMessage KError = 2
ErrUnknownTopicOrPartition KError = 3
ErrInvalidMessageSize KError = 4
ErrLeaderNotAvailable KError = 5
ErrNotLeaderForPartition KError = 6
ErrRequestTimedOut KError = 7
ErrBrokerNotAvailable KError = 8
ErrReplicaNotAvailable KError = 9
ErrMessageSizeTooLarge KError = 10
ErrStaleControllerEpochCode KError = 11
ErrOffsetMetadataTooLarge KError = 12
ErrNetworkException KError = 13
ErrOffsetsLoadInProgress KError = 14
ErrConsumerCoordinatorNotAvailable KError = 15
ErrNotCoordinatorForConsumer KError = 16
ErrInvalidTopic KError = 17
ErrMessageSetSizeTooLarge KError = 18
ErrNotEnoughReplicas KError = 19
ErrNotEnoughReplicasAfterAppend KError = 20
ErrInvalidRequiredAcks KError = 21
ErrIllegalGeneration KError = 22
ErrInconsistentGroupProtocol KError = 23
ErrInvalidGroupId KError = 24
ErrUnknownMemberId KError = 25
ErrInvalidSessionTimeout KError = 26
ErrRebalanceInProgress KError = 27
ErrInvalidCommitOffsetSize KError = 28
ErrTopicAuthorizationFailed KError = 29
ErrGroupAuthorizationFailed KError = 30
ErrClusterAuthorizationFailed KError = 31
ErrInvalidTimestamp KError = 32
ErrUnsupportedSASLMechanism KError = 33
ErrIllegalSASLState KError = 34
ErrUnsupportedVersion KError = 35
ErrTopicAlreadyExists KError = 36
ErrInvalidPartitions KError = 37
ErrInvalidReplicationFactor KError = 38
ErrInvalidReplicaAssignment KError = 39
ErrInvalidConfig KError = 40
ErrNotController KError = 41
ErrInvalidRequest KError = 42
ErrUnsupportedForMessageFormat KError = 43
ErrPolicyViolation KError = 44
ErrOutOfOrderSequenceNumber KError = 45
ErrDuplicateSequenceNumber KError = 46
ErrInvalidProducerEpoch KError = 47
ErrInvalidTxnState KError = 48
ErrInvalidProducerIDMapping KError = 49
ErrInvalidTransactionTimeout KError = 50
ErrConcurrentTransactions KError = 51
ErrTransactionCoordinatorFenced KError = 52
ErrTransactionalIDAuthorizationFailed KError = 53
ErrSecurityDisabled KError = 54
ErrOperationNotAttempted KError = 55
ErrKafkaStorageError KError = 56
ErrLogDirNotFound KError = 57
ErrSASLAuthenticationFailed KError = 58
ErrUnknownProducerID KError = 59
ErrReassignmentInProgress KError = 60
ErrDelegationTokenAuthDisabled KError = 61
ErrDelegationTokenNotFound KError = 62
ErrDelegationTokenOwnerMismatch KError = 63
ErrDelegationTokenRequestNotAllowed KError = 64
ErrDelegationTokenAuthorizationFailed KError = 65
ErrDelegationTokenExpired KError = 66
ErrInvalidPrincipalType KError = 67
ErrNonEmptyGroup KError = 68
ErrGroupIDNotFound KError = 69
ErrFetchSessionIDNotFound KError = 70
ErrInvalidFetchSessionEpoch KError = 71
ErrListenerNotFound KError = 72
ErrTopicDeletionDisabled KError = 73
ErrFencedLeaderEpoch KError = 74
ErrUnknownLeaderEpoch KError = 75
ErrUnsupportedCompressionType KError = 76
ErrStaleBrokerEpoch KError = 77
ErrOffsetNotAvailable KError = 78
ErrMemberIdRequired KError = 79
ErrPreferredLeaderNotAvailable KError = 80
ErrGroupMaxSizeReached KError = 81
ErrFencedInstancedId KError = 82
)
func (err KError) Error() string {
// Error messages stolen/adapted from
// https://kafka.apache.org/protocol#protocol_error_codes
switch err {
case ErrNoError:
return "kafka server: Not an error, why are you printing me?"
case ErrUnknown:
return "kafka server: Unexpected (unknown?) server error."
case ErrOffsetOutOfRange:
return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition."
case ErrInvalidMessage:
return "kafka server: Message contents does not match its CRC."
case ErrUnknownTopicOrPartition:
return "kafka server: Request was for a topic or partition that does not exist on this broker."
case ErrInvalidMessageSize:
return "kafka server: The message has a negative size."
case ErrLeaderNotAvailable:
return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes."
case ErrNotLeaderForPartition:
return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date."
case ErrRequestTimedOut:
return "kafka server: Request exceeded the user-specified time limit in the request."
case ErrBrokerNotAvailable:
return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!"
case ErrReplicaNotAvailable:
return "kafka server: Replica information not available, one or more brokers are down."
case ErrMessageSizeTooLarge:
return "kafka server: Message was too large, server rejected it to avoid allocation error."
case ErrStaleControllerEpochCode:
return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)."
case ErrOffsetMetadataTooLarge:
return "kafka server: Specified a string larger than the configured maximum for offset metadata."
case ErrNetworkException:
return "kafka server: The server disconnected before a response was received."
case ErrOffsetsLoadInProgress:
return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition."
case ErrConsumerCoordinatorNotAvailable:
return "kafka server: Offset's topic has not yet been created."
case ErrNotCoordinatorForConsumer:
return "kafka server: Request was for a consumer group that is not coordinated by this broker."
case ErrInvalidTopic:
return "kafka server: The request attempted to perform an operation on an invalid topic."
case ErrMessageSetSizeTooLarge:
return "kafka server: The request included message batch larger than the configured segment size on the server."
case ErrNotEnoughReplicas:
return "kafka server: Messages are rejected since there are fewer in-sync replicas than required."
case ErrNotEnoughReplicasAfterAppend:
return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required."
case ErrInvalidRequiredAcks:
return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)."
case ErrIllegalGeneration:
return "kafka server: The provided generation id is not the current generation."
case ErrInconsistentGroupProtocol:
return "kafka server: The provider group protocol type is incompatible with the other members."
case ErrInvalidGroupId:
return "kafka server: The provided group id was empty."
case ErrUnknownMemberId:
return "kafka server: The provided member is not known in the current generation."
case ErrInvalidSessionTimeout:
return "kafka server: The provided session timeout is outside the allowed range."
case ErrRebalanceInProgress:
return "kafka server: A rebalance for the group is in progress. Please re-join the group."
case ErrInvalidCommitOffsetSize:
return "kafka server: The provided commit metadata was too large."
case ErrTopicAuthorizationFailed:
return "kafka server: The client is not authorized to access this topic."
case ErrGroupAuthorizationFailed:
return "kafka server: The client is not authorized to access this group."
case ErrClusterAuthorizationFailed:
return "kafka server: The client is not authorized to send this request type."
case ErrInvalidTimestamp:
return "kafka server: The timestamp of the message is out of acceptable range."
case ErrUnsupportedSASLMechanism:
return "kafka server: The broker does not support the requested SASL mechanism."
case ErrIllegalSASLState:
return "kafka server: Request is not valid given the current SASL state."
case ErrUnsupportedVersion:
return "kafka server: The version of API is not supported."
case ErrTopicAlreadyExists:
return "kafka server: Topic with this name already exists."
case ErrInvalidPartitions:
return "kafka server: Number of partitions is invalid."
case ErrInvalidReplicationFactor:
return "kafka server: Replication-factor is invalid."
case ErrInvalidReplicaAssignment:
return "kafka server: Replica assignment is invalid."
case ErrInvalidConfig:
return "kafka server: Configuration is invalid."
case ErrNotController:
return "kafka server: This is not the correct controller for this cluster."
case ErrInvalidRequest:
return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details."
case ErrUnsupportedForMessageFormat:
return "kafka server: The requested operation is not supported by the message format version."
case ErrPolicyViolation:
return "kafka server: Request parameters do not satisfy the configured policy."
case ErrOutOfOrderSequenceNumber:
return "kafka server: The broker received an out of order sequence number."
case ErrDuplicateSequenceNumber:
return "kafka server: The broker received a duplicate sequence number."
case ErrInvalidProducerEpoch:
return "kafka server: Producer attempted an operation with an old epoch."
case ErrInvalidTxnState:
return "kafka server: The producer attempted a transactional operation in an invalid state."
case ErrInvalidProducerIDMapping:
return "kafka server: The producer attempted to use a producer id which is not currently assigned to its transactional id."
case ErrInvalidTransactionTimeout:
return "kafka server: The transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)."
case ErrConcurrentTransactions:
return "kafka server: The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing."
case ErrTransactionCoordinatorFenced:
return "kafka server: The transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer."
case ErrTransactionalIDAuthorizationFailed:
return "kafka server: Transactional ID authorization failed."
case ErrSecurityDisabled:
return "kafka server: Security features are disabled."
case ErrOperationNotAttempted:
return "kafka server: The broker did not attempt to execute this operation."
case ErrKafkaStorageError:
return "kafka server: Disk error when trying to access log file on the disk."
case ErrLogDirNotFound:
return "kafka server: The specified log directory is not found in the broker config."
case ErrSASLAuthenticationFailed:
return "kafka server: SASL Authentication failed."
case ErrUnknownProducerID:
return "kafka server: The broker could not locate the producer metadata associated with the Producer ID."
case ErrReassignmentInProgress:
return "kafka server: A partition reassignment is in progress."
case ErrDelegationTokenAuthDisabled:
return "kafka server: Delegation Token feature is not enabled."
case ErrDelegationTokenNotFound:
return "kafka server: Delegation Token is not found on server."
case ErrDelegationTokenOwnerMismatch:
return "kafka server: Specified Principal is not valid Owner/Renewer."
case ErrDelegationTokenRequestNotAllowed:
return "kafka server: Delegation Token requests are not allowed on PLAINTEXT/1-way SSL channels and on delegation token authenticated channels."
case ErrDelegationTokenAuthorizationFailed:
return "kafka server: Delegation Token authorization failed."
case ErrDelegationTokenExpired:
return "kafka server: Delegation Token is expired."
case ErrInvalidPrincipalType:
return "kafka server: Supplied principalType is not supported."
case ErrNonEmptyGroup:
return "kafka server: The group is not empty."
case ErrGroupIDNotFound:
return "kafka server: The group id does not exist."
case ErrFetchSessionIDNotFound:
return "kafka server: The fetch session ID was not found."
case ErrInvalidFetchSessionEpoch:
return "kafka server: The fetch session epoch is invalid."
case ErrListenerNotFound:
return "kafka server: There is no listener on the leader broker that matches the listener on which metadata request was processed."
case ErrTopicDeletionDisabled:
return "kafka server: Topic deletion is disabled."
case ErrFencedLeaderEpoch:
return "kafka server: The leader epoch in the request is older than the epoch on the broker."
case ErrUnknownLeaderEpoch:
return "kafka server: The leader epoch in the request is newer than the epoch on the broker."
case ErrUnsupportedCompressionType:
return "kafka server: The requesting client does not support the compression type of given partition."
case ErrStaleBrokerEpoch:
return "kafka server: Broker epoch has changed"
case ErrOffsetNotAvailable:
return "kafka server: The leader high watermark has not caught up from a recent leader election so the offsets cannot be guaranteed to be monotonically increasing"
case ErrMemberIdRequired:
return "kafka server: The group member needs to have a valid member id before actually entering a consumer group"
case ErrPreferredLeaderNotAvailable:
return "kafka server: The preferred leader was not available"
case ErrGroupMaxSizeReached:
return "kafka server: Consumer group The consumer group has reached its max size. already has the configured maximum number of members."
case ErrFencedInstancedId:
return "kafka server: The broker rejected this static consumer since another consumer with the same group.instance.id has registered with a different member.id."
}
return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)
}

295
vendor/github.com/Shopify/sarama/fetch_request.go generated vendored Normal file
View File

@ -0,0 +1,295 @@
package sarama
type fetchRequestBlock struct {
Version int16
currentLeaderEpoch int32
fetchOffset int64
logStartOffset int64
maxBytes int32
}
func (b *fetchRequestBlock) encode(pe packetEncoder, version int16) error {
b.Version = version
if b.Version >= 9 {
pe.putInt32(b.currentLeaderEpoch)
}
pe.putInt64(b.fetchOffset)
if b.Version >= 5 {
pe.putInt64(b.logStartOffset)
}
pe.putInt32(b.maxBytes)
return nil
}
func (b *fetchRequestBlock) decode(pd packetDecoder, version int16) (err error) {
b.Version = version
if b.Version >= 9 {
if b.currentLeaderEpoch, err = pd.getInt32(); err != nil {
return err
}
}
if b.fetchOffset, err = pd.getInt64(); err != nil {
return err
}
if b.Version >= 5 {
if b.logStartOffset, err = pd.getInt64(); err != nil {
return err
}
}
if b.maxBytes, err = pd.getInt32(); err != nil {
return err
}
return nil
}
// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See
// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at
// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes
type FetchRequest struct {
MaxWaitTime int32
MinBytes int32
MaxBytes int32
Version int16
Isolation IsolationLevel
SessionID int32
SessionEpoch int32
blocks map[string]map[int32]*fetchRequestBlock
forgotten map[string][]int32
RackID string
}
type IsolationLevel int8
const (
ReadUncommitted IsolationLevel = iota
ReadCommitted
)
func (r *FetchRequest) encode(pe packetEncoder) (err error) {
pe.putInt32(-1) // replica ID is always -1 for clients
pe.putInt32(r.MaxWaitTime)
pe.putInt32(r.MinBytes)
if r.Version >= 3 {
pe.putInt32(r.MaxBytes)
}
if r.Version >= 4 {
pe.putInt8(int8(r.Isolation))
}
if r.Version >= 7 {
pe.putInt32(r.SessionID)
pe.putInt32(r.SessionEpoch)
}
err = pe.putArrayLength(len(r.blocks))
if err != nil {
return err
}
for topic, blocks := range r.blocks {
err = pe.putString(topic)
if err != nil {
return err
}
err = pe.putArrayLength(len(blocks))
if err != nil {
return err
}
for partition, block := range blocks {
pe.putInt32(partition)
err = block.encode(pe, r.Version)
if err != nil {
return err
}
}
}
if r.Version >= 7 {
err = pe.putArrayLength(len(r.forgotten))
if err != nil {
return err
}
for topic, partitions := range r.forgotten {
err = pe.putString(topic)
if err != nil {
return err
}
err = pe.putArrayLength(len(partitions))
if err != nil {
return err
}
for _, partition := range partitions {
pe.putInt32(partition)
}
}
}
if r.Version >= 11 {
err = pe.putString(r.RackID)
if err != nil {
return err
}
}
return nil
}
func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if _, err = pd.getInt32(); err != nil {
return err
}
if r.MaxWaitTime, err = pd.getInt32(); err != nil {
return err
}
if r.MinBytes, err = pd.getInt32(); err != nil {
return err
}
if r.Version >= 3 {
if r.MaxBytes, err = pd.getInt32(); err != nil {
return err
}
}
if r.Version >= 4 {
isolation, err := pd.getInt8()
if err != nil {
return err
}
r.Isolation = IsolationLevel(isolation)
}
if r.Version >= 7 {
r.SessionID, err = pd.getInt32()
if err != nil {
return err
}
r.SessionEpoch, err = pd.getInt32()
if err != nil {
return err
}
}
topicCount, err := pd.getArrayLength()
if err != nil {
return err
}
if topicCount == 0 {
return nil
}
r.blocks = make(map[string]map[int32]*fetchRequestBlock)
for i := 0; i < topicCount; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
partitionCount, err := pd.getArrayLength()
if err != nil {
return err
}
r.blocks[topic] = make(map[int32]*fetchRequestBlock)
for j := 0; j < partitionCount; j++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
fetchBlock := &fetchRequestBlock{}
if err = fetchBlock.decode(pd, r.Version); err != nil {
return err
}
r.blocks[topic][partition] = fetchBlock
}
}
if r.Version >= 7 {
forgottenCount, err := pd.getArrayLength()
if err != nil {
return err
}
r.forgotten = make(map[string][]int32)
for i := 0; i < forgottenCount; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
partitionCount, err := pd.getArrayLength()
if err != nil {
return err
}
r.forgotten[topic] = make([]int32, partitionCount)
for j := 0; j < partitionCount; j++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
r.forgotten[topic][j] = partition
}
}
}
if r.Version >= 11 {
r.RackID, err = pd.getString()
if err != nil {
return err
}
}
return nil
}
func (r *FetchRequest) key() int16 {
return 1
}
func (r *FetchRequest) version() int16 {
return r.Version
}
func (r *FetchRequest) headerVersion() int16 {
return 1
}
func (r *FetchRequest) requiredVersion() KafkaVersion {
switch r.Version {
case 0:
return MinVersion
case 1:
return V0_9_0_0
case 2:
return V0_10_0_0
case 3:
return V0_10_1_0
case 4, 5:
return V0_11_0_0
case 6:
return V1_0_0_0
case 7:
return V1_1_0_0
case 8:
return V2_0_0_0
case 9, 10:
return V2_1_0_0
case 11:
return V2_3_0_0
default:
return MaxVersion
}
}
func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) {
if r.blocks == nil {
r.blocks = make(map[string]map[int32]*fetchRequestBlock)
}
if r.Version >= 7 && r.forgotten == nil {
r.forgotten = make(map[string][]int32)
}
if r.blocks[topic] == nil {
r.blocks[topic] = make(map[int32]*fetchRequestBlock)
}
tmp := new(fetchRequestBlock)
tmp.Version = r.Version
tmp.maxBytes = maxBytes
tmp.fetchOffset = fetchOffset
if r.Version >= 9 {
tmp.currentLeaderEpoch = int32(-1)
}
r.blocks[topic][partitionID] = tmp
}

546
vendor/github.com/Shopify/sarama/fetch_response.go generated vendored Normal file
View File

@ -0,0 +1,546 @@
package sarama
import (
"sort"
"time"
)
type AbortedTransaction struct {
ProducerID int64
FirstOffset int64
}
func (t *AbortedTransaction) decode(pd packetDecoder) (err error) {
if t.ProducerID, err = pd.getInt64(); err != nil {
return err
}
if t.FirstOffset, err = pd.getInt64(); err != nil {
return err
}
return nil
}
func (t *AbortedTransaction) encode(pe packetEncoder) (err error) {
pe.putInt64(t.ProducerID)
pe.putInt64(t.FirstOffset)
return nil
}
type FetchResponseBlock struct {
Err KError
HighWaterMarkOffset int64
LastStableOffset int64
LogStartOffset int64
AbortedTransactions []*AbortedTransaction
PreferredReadReplica int32
Records *Records // deprecated: use FetchResponseBlock.RecordsSet
RecordsSet []*Records
Partial bool
}
func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
tmp, err := pd.getInt16()
if err != nil {
return err
}
b.Err = KError(tmp)
b.HighWaterMarkOffset, err = pd.getInt64()
if err != nil {
return err
}
if version >= 4 {
b.LastStableOffset, err = pd.getInt64()
if err != nil {
return err
}
if version >= 5 {
b.LogStartOffset, err = pd.getInt64()
if err != nil {
return err
}
}
numTransact, err := pd.getArrayLength()
if err != nil {
return err
}
if numTransact >= 0 {
b.AbortedTransactions = make([]*AbortedTransaction, numTransact)
}
for i := 0; i < numTransact; i++ {
transact := new(AbortedTransaction)
if err = transact.decode(pd); err != nil {
return err
}
b.AbortedTransactions[i] = transact
}
}
if version >= 11 {
b.PreferredReadReplica, err = pd.getInt32()
if err != nil {
return err
}
}
recordsSize, err := pd.getInt32()
if err != nil {
return err
}
recordsDecoder, err := pd.getSubset(int(recordsSize))
if err != nil {
return err
}
b.RecordsSet = []*Records{}
for recordsDecoder.remaining() > 0 {
records := &Records{}
if err := records.decode(recordsDecoder); err != nil {
// If we have at least one decoded records, this is not an error
if err == ErrInsufficientData {
if len(b.RecordsSet) == 0 {
b.Partial = true
}
break
}
return err
}
partial, err := records.isPartial()
if err != nil {
return err
}
n, err := records.numRecords()
if err != nil {
return err
}
if n > 0 || (partial && len(b.RecordsSet) == 0) {
b.RecordsSet = append(b.RecordsSet, records)
if b.Records == nil {
b.Records = records
}
}
overflow, err := records.isOverflow()
if err != nil {
return err
}
if partial || overflow {
break
}
}
return nil
}
func (b *FetchResponseBlock) numRecords() (int, error) {
sum := 0
for _, records := range b.RecordsSet {
count, err := records.numRecords()
if err != nil {
return 0, err
}
sum += count
}
return sum, nil
}
func (b *FetchResponseBlock) isPartial() (bool, error) {
if b.Partial {
return true, nil
}
if len(b.RecordsSet) == 1 {
return b.RecordsSet[0].isPartial()
}
return false, nil
}
func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
pe.putInt16(int16(b.Err))
pe.putInt64(b.HighWaterMarkOffset)
if version >= 4 {
pe.putInt64(b.LastStableOffset)
if version >= 5 {
pe.putInt64(b.LogStartOffset)
}
if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil {
return err
}
for _, transact := range b.AbortedTransactions {
if err = transact.encode(pe); err != nil {
return err
}
}
}
if version >= 11 {
pe.putInt32(b.PreferredReadReplica)
}
pe.push(&lengthField{})
for _, records := range b.RecordsSet {
err = records.encode(pe)
if err != nil {
return err
}
}
return pe.pop()
}
func (b *FetchResponseBlock) getAbortedTransactions() []*AbortedTransaction {
// I can't find any doc that guarantee the field `fetchResponse.AbortedTransactions` is ordered
// plus Java implementation use a PriorityQueue based on `FirstOffset`. I guess we have to order it ourself
at := b.AbortedTransactions
sort.Slice(
at,
func(i, j int) bool { return at[i].FirstOffset < at[j].FirstOffset },
)
return at
}
type FetchResponse struct {
Blocks map[string]map[int32]*FetchResponseBlock
ThrottleTime time.Duration
ErrorCode int16
SessionID int32
Version int16
LogAppendTime bool
Timestamp time.Time
}
func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if r.Version >= 1 {
throttle, err := pd.getInt32()
if err != nil {
return err
}
r.ThrottleTime = time.Duration(throttle) * time.Millisecond
}
if r.Version >= 7 {
r.ErrorCode, err = pd.getInt16()
if err != nil {
return err
}
r.SessionID, err = pd.getInt32()
if err != nil {
return err
}
}
numTopics, err := pd.getArrayLength()
if err != nil {
return err
}
r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics)
for i := 0; i < numTopics; i++ {
name, err := pd.getString()
if err != nil {
return err
}
numBlocks, err := pd.getArrayLength()
if err != nil {
return err
}
r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks)
for j := 0; j < numBlocks; j++ {
id, err := pd.getInt32()
if err != nil {
return err
}
block := new(FetchResponseBlock)
err = block.decode(pd, version)
if err != nil {
return err
}
r.Blocks[name][id] = block
}
}
return nil
}
func (r *FetchResponse) encode(pe packetEncoder) (err error) {
if r.Version >= 1 {
pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
}
if r.Version >= 7 {
pe.putInt16(r.ErrorCode)
pe.putInt32(r.SessionID)
}
err = pe.putArrayLength(len(r.Blocks))
if err != nil {
return err
}
for topic, partitions := range r.Blocks {
err = pe.putString(topic)
if err != nil {
return err
}
err = pe.putArrayLength(len(partitions))
if err != nil {
return err
}
for id, block := range partitions {
pe.putInt32(id)
err = block.encode(pe, r.Version)
if err != nil {
return err
}
}
}
return nil
}
func (r *FetchResponse) key() int16 {
return 1
}
func (r *FetchResponse) version() int16 {
return r.Version
}
func (r *FetchResponse) headerVersion() int16 {
return 0
}
func (r *FetchResponse) requiredVersion() KafkaVersion {
switch r.Version {
case 0:
return MinVersion
case 1:
return V0_9_0_0
case 2:
return V0_10_0_0
case 3:
return V0_10_1_0
case 4, 5:
return V0_11_0_0
case 6:
return V1_0_0_0
case 7:
return V1_1_0_0
case 8:
return V2_0_0_0
case 9, 10:
return V2_1_0_0
case 11:
return V2_3_0_0
default:
return MaxVersion
}
}
func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock {
if r.Blocks == nil {
return nil
}
if r.Blocks[topic] == nil {
return nil
}
return r.Blocks[topic][partition]
}
func (r *FetchResponse) AddError(topic string, partition int32, err KError) {
if r.Blocks == nil {
r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
}
partitions, ok := r.Blocks[topic]
if !ok {
partitions = make(map[int32]*FetchResponseBlock)
r.Blocks[topic] = partitions
}
frb, ok := partitions[partition]
if !ok {
frb = new(FetchResponseBlock)
partitions[partition] = frb
}
frb.Err = err
}
func (r *FetchResponse) getOrCreateBlock(topic string, partition int32) *FetchResponseBlock {
if r.Blocks == nil {
r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
}
partitions, ok := r.Blocks[topic]
if !ok {
partitions = make(map[int32]*FetchResponseBlock)
r.Blocks[topic] = partitions
}
frb, ok := partitions[partition]
if !ok {
frb = new(FetchResponseBlock)
partitions[partition] = frb
}
return frb
}
func encodeKV(key, value Encoder) ([]byte, []byte) {
var kb []byte
var vb []byte
if key != nil {
kb, _ = key.Encode()
}
if value != nil {
vb, _ = value.Encode()
}
return kb, vb
}
func (r *FetchResponse) AddMessageWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time, version int8) {
frb := r.getOrCreateBlock(topic, partition)
kb, vb := encodeKV(key, value)
if r.LogAppendTime {
timestamp = r.Timestamp
}
msg := &Message{Key: kb, Value: vb, LogAppendTime: r.LogAppendTime, Timestamp: timestamp, Version: version}
msgBlock := &MessageBlock{Msg: msg, Offset: offset}
if len(frb.RecordsSet) == 0 {
records := newLegacyRecords(&MessageSet{})
frb.RecordsSet = []*Records{&records}
}
set := frb.RecordsSet[0].MsgSet
set.Messages = append(set.Messages, msgBlock)
}
func (r *FetchResponse) AddRecordWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time) {
frb := r.getOrCreateBlock(topic, partition)
kb, vb := encodeKV(key, value)
if len(frb.RecordsSet) == 0 {
records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp})
frb.RecordsSet = []*Records{&records}
}
batch := frb.RecordsSet[0].RecordBatch
rec := &Record{Key: kb, Value: vb, OffsetDelta: offset, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
batch.addRecord(rec)
}
// AddRecordBatchWithTimestamp is similar to AddRecordWithTimestamp
// But instead of appending 1 record to a batch, it append a new batch containing 1 record to the fetchResponse
// Since transaction are handled on batch level (the whole batch is either committed or aborted), use this to test transactions
func (r *FetchResponse) AddRecordBatchWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool, timestamp time.Time) {
frb := r.getOrCreateBlock(topic, partition)
kb, vb := encodeKV(key, value)
records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp})
batch := &RecordBatch{
Version: 2,
LogAppendTime: r.LogAppendTime,
FirstTimestamp: timestamp,
MaxTimestamp: r.Timestamp,
FirstOffset: offset,
LastOffsetDelta: 0,
ProducerID: producerID,
IsTransactional: isTransactional,
}
rec := &Record{Key: kb, Value: vb, OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
batch.addRecord(rec)
records.RecordBatch = batch
frb.RecordsSet = append(frb.RecordsSet, &records)
}
func (r *FetchResponse) AddControlRecordWithTimestamp(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType, timestamp time.Time) {
frb := r.getOrCreateBlock(topic, partition)
// batch
batch := &RecordBatch{
Version: 2,
LogAppendTime: r.LogAppendTime,
FirstTimestamp: timestamp,
MaxTimestamp: r.Timestamp,
FirstOffset: offset,
LastOffsetDelta: 0,
ProducerID: producerID,
IsTransactional: true,
Control: true,
}
// records
records := newDefaultRecords(nil)
records.RecordBatch = batch
// record
crAbort := ControlRecord{
Version: 0,
Type: recordType,
}
crKey := &realEncoder{raw: make([]byte, 4)}
crValue := &realEncoder{raw: make([]byte, 6)}
crAbort.encode(crKey, crValue)
rec := &Record{Key: ByteEncoder(crKey.raw), Value: ByteEncoder(crValue.raw), OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
batch.addRecord(rec)
frb.RecordsSet = append(frb.RecordsSet, &records)
}
func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
r.AddMessageWithTimestamp(topic, partition, key, value, offset, time.Time{}, 0)
}
func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) {
r.AddRecordWithTimestamp(topic, partition, key, value, offset, time.Time{})
}
func (r *FetchResponse) AddRecordBatch(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool) {
r.AddRecordBatchWithTimestamp(topic, partition, key, value, offset, producerID, isTransactional, time.Time{})
}
func (r *FetchResponse) AddControlRecord(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType) {
// define controlRecord key and value
r.AddControlRecordWithTimestamp(topic, partition, offset, producerID, recordType, time.Time{})
}
func (r *FetchResponse) SetLastOffsetDelta(topic string, partition int32, offset int32) {
frb := r.getOrCreateBlock(topic, partition)
if len(frb.RecordsSet) == 0 {
records := newDefaultRecords(&RecordBatch{Version: 2})
frb.RecordsSet = []*Records{&records}
}
batch := frb.RecordsSet[0].RecordBatch
batch.LastOffsetDelta = offset
}
func (r *FetchResponse) SetLastStableOffset(topic string, partition int32, offset int64) {
frb := r.getOrCreateBlock(topic, partition)
frb.LastStableOffset = offset
}

View File

@ -0,0 +1,65 @@
package sarama
type CoordinatorType int8
const (
CoordinatorGroup CoordinatorType = iota
CoordinatorTransaction
)
type FindCoordinatorRequest struct {
Version int16
CoordinatorKey string
CoordinatorType CoordinatorType
}
func (f *FindCoordinatorRequest) encode(pe packetEncoder) error {
if err := pe.putString(f.CoordinatorKey); err != nil {
return err
}
if f.Version >= 1 {
pe.putInt8(int8(f.CoordinatorType))
}
return nil
}
func (f *FindCoordinatorRequest) decode(pd packetDecoder, version int16) (err error) {
if f.CoordinatorKey, err = pd.getString(); err != nil {
return err
}
if version >= 1 {
f.Version = version
coordinatorType, err := pd.getInt8()
if err != nil {
return err
}
f.CoordinatorType = CoordinatorType(coordinatorType)
}
return nil
}
func (f *FindCoordinatorRequest) key() int16 {
return 10
}
func (f *FindCoordinatorRequest) version() int16 {
return f.Version
}
func (r *FindCoordinatorRequest) headerVersion() int16 {
return 1
}
func (f *FindCoordinatorRequest) requiredVersion() KafkaVersion {
switch f.Version {
case 1:
return V0_11_0_0
default:
return V0_8_2_0
}
}

View File

@ -0,0 +1,96 @@
package sarama
import (
"time"
)
var NoNode = &Broker{id: -1, addr: ":-1"}
type FindCoordinatorResponse struct {
Version int16
ThrottleTime time.Duration
Err KError
ErrMsg *string
Coordinator *Broker
}
func (f *FindCoordinatorResponse) decode(pd packetDecoder, version int16) (err error) {
if version >= 1 {
f.Version = version
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
f.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
}
tmp, err := pd.getInt16()
if err != nil {
return err
}
f.Err = KError(tmp)
if version >= 1 {
if f.ErrMsg, err = pd.getNullableString(); err != nil {
return err
}
}
coordinator := new(Broker)
// The version is hardcoded to 0, as version 1 of the Broker-decode
// contains the rack-field which is not present in the FindCoordinatorResponse.
if err := coordinator.decode(pd, 0); err != nil {
return err
}
if coordinator.addr == ":0" {
return nil
}
f.Coordinator = coordinator
return nil
}
func (f *FindCoordinatorResponse) encode(pe packetEncoder) error {
if f.Version >= 1 {
pe.putInt32(int32(f.ThrottleTime / time.Millisecond))
}
pe.putInt16(int16(f.Err))
if f.Version >= 1 {
if err := pe.putNullableString(f.ErrMsg); err != nil {
return err
}
}
coordinator := f.Coordinator
if coordinator == nil {
coordinator = NoNode
}
if err := coordinator.encode(pe, 0); err != nil {
return err
}
return nil
}
func (f *FindCoordinatorResponse) key() int16 {
return 10
}
func (f *FindCoordinatorResponse) version() int16 {
return f.Version
}
func (r *FindCoordinatorResponse) headerVersion() int16 {
return 0
}
func (f *FindCoordinatorResponse) requiredVersion() KafkaVersion {
switch f.Version {
case 1:
return V0_11_0_0
default:
return V0_8_2_0
}
}

34
vendor/github.com/Shopify/sarama/go.mod generated vendored Normal file
View File

@ -0,0 +1,34 @@
module github.com/Shopify/sarama
go 1.13
require (
github.com/Shopify/toxiproxy v2.1.4+incompatible
github.com/davecgh/go-spew v1.1.1
github.com/eapache/go-resiliency v1.2.0
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21
github.com/eapache/queue v1.1.0
github.com/fortytw2/leaktest v1.3.0
github.com/frankban/quicktest v1.7.2 // indirect
github.com/golang/snappy v0.0.1 // indirect
github.com/google/go-cmp v0.4.0 // indirect
github.com/hashicorp/go-uuid v1.0.2 // indirect
github.com/jcmturner/gofork v1.0.0 // indirect
github.com/klauspost/compress v1.9.8
github.com/kr/pretty v0.2.0 // indirect
github.com/pierrec/lz4 v2.4.1+incompatible
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563
github.com/stretchr/testify v1.4.0
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c
github.com/xdg/stringprep v1.0.0 // indirect
golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72 // indirect
golang.org/x/net v0.0.0-20200202094626-16171245cfb2
golang.org/x/text v0.3.2 // indirect
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect
gopkg.in/jcmturner/dnsutils.v1 v1.0.1 // indirect
gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect
gopkg.in/jcmturner/gokrb5.v7 v7.5.0
gopkg.in/jcmturner/rpc.v1 v1.1.0 // indirect
gopkg.in/yaml.v2 v2.2.8 // indirect
)

81
vendor/github.com/Shopify/sarama/go.sum generated vendored Normal file
View File

@ -0,0 +1,81 @@
github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q=
github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3BTYk=
github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE=
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8=
github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
github.com/klauspost/compress v1.9.8 h1:VMAMUUOh+gaxKTMk+zqbjsSjsIcUcL/LF4o63i82QyA=
github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/pierrec/lz4 v2.4.1+incompatible h1:mFe7ttWaflA46Mhqh+jUfjp2qTbPYxLB2/OyBppH9dg=
github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ=
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk=
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72 h1:+ELyKg6m8UBf0nPFSqD0mi7zUfwPyXo23HNjMnXPz7w=
golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw=
gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo=
gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM=
gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q=
gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI=
gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4=
gopkg.in/jcmturner/gokrb5.v7 v7.5.0 h1:a9tsXlIDD9SKxotJMK3niV7rPZAJeX2aD/0yg3qlIrg=
gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU=
gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

257
vendor/github.com/Shopify/sarama/gssapi_kerberos.go generated vendored Normal file
View File

@ -0,0 +1,257 @@
package sarama
import (
"encoding/asn1"
"encoding/binary"
"fmt"
"io"
"strings"
"time"
"gopkg.in/jcmturner/gokrb5.v7/asn1tools"
"gopkg.in/jcmturner/gokrb5.v7/gssapi"
"gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype"
"gopkg.in/jcmturner/gokrb5.v7/iana/keyusage"
"gopkg.in/jcmturner/gokrb5.v7/messages"
"gopkg.in/jcmturner/gokrb5.v7/types"
)
const (
TOK_ID_KRB_AP_REQ = 256
GSS_API_GENERIC_TAG = 0x60
KRB5_USER_AUTH = 1
KRB5_KEYTAB_AUTH = 2
GSS_API_INITIAL = 1
GSS_API_VERIFY = 2
GSS_API_FINISH = 3
)
type GSSAPIConfig struct {
AuthType int
KeyTabPath string
KerberosConfigPath string
ServiceName string
Username string
Password string
Realm string
}
type GSSAPIKerberosAuth struct {
Config *GSSAPIConfig
ticket messages.Ticket
encKey types.EncryptionKey
NewKerberosClientFunc func(config *GSSAPIConfig) (KerberosClient, error)
step int
}
type KerberosClient interface {
Login() error
GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error)
Domain() string
CName() types.PrincipalName
Destroy()
}
/*
*
* Appends length in big endian before payload, and send it to kafka
*
*/
func (krbAuth *GSSAPIKerberosAuth) writePackage(broker *Broker, payload []byte) (int, error) {
length := len(payload)
finalPackage := make([]byte, length+4) //4 byte length header + payload
copy(finalPackage[4:], payload)
binary.BigEndian.PutUint32(finalPackage, uint32(length))
bytes, err := broker.conn.Write(finalPackage)
if err != nil {
return bytes, err
}
return bytes, nil
}
/*
*
* Read length (4 bytes) and then read the payload
*
*/
func (krbAuth *GSSAPIKerberosAuth) readPackage(broker *Broker) ([]byte, int, error) {
bytesRead := 0
lengthInBytes := make([]byte, 4)
bytes, err := io.ReadFull(broker.conn, lengthInBytes)
if err != nil {
return nil, bytesRead, err
}
bytesRead += bytes
payloadLength := binary.BigEndian.Uint32(lengthInBytes)
payloadBytes := make([]byte, payloadLength) // buffer for read..
bytes, err = io.ReadFull(broker.conn, payloadBytes) // read bytes
if err != nil {
return payloadBytes, bytesRead, err
}
bytesRead += bytes
return payloadBytes, bytesRead, nil
}
func (krbAuth *GSSAPIKerberosAuth) newAuthenticatorChecksum() []byte {
a := make([]byte, 24)
flags := []int{gssapi.ContextFlagInteg, gssapi.ContextFlagConf}
binary.LittleEndian.PutUint32(a[:4], 16)
for _, i := range flags {
f := binary.LittleEndian.Uint32(a[20:24])
f |= uint32(i)
binary.LittleEndian.PutUint32(a[20:24], f)
}
return a
}
/*
*
* Construct Kerberos AP_REQ package, conforming to RFC-4120
* https://tools.ietf.org/html/rfc4120#page-84
*
*/
func (krbAuth *GSSAPIKerberosAuth) createKrb5Token(
domain string, cname types.PrincipalName,
ticket messages.Ticket,
sessionKey types.EncryptionKey) ([]byte, error) {
auth, err := types.NewAuthenticator(domain, cname)
if err != nil {
return nil, err
}
auth.Cksum = types.Checksum{
CksumType: chksumtype.GSSAPI,
Checksum: krbAuth.newAuthenticatorChecksum(),
}
APReq, err := messages.NewAPReq(
ticket,
sessionKey,
auth,
)
if err != nil {
return nil, err
}
aprBytes := make([]byte, 2)
binary.BigEndian.PutUint16(aprBytes, TOK_ID_KRB_AP_REQ)
tb, err := APReq.Marshal()
if err != nil {
return nil, err
}
aprBytes = append(aprBytes, tb...)
return aprBytes, nil
}
/*
*
* Append the GSS-API header to the payload, conforming to RFC-2743
* Section 3.1, Mechanism-Independent Token Format
*
* https://tools.ietf.org/html/rfc2743#page-81
*
* GSSAPIHeader + <specific mechanism payload>
*
*/
func (krbAuth *GSSAPIKerberosAuth) appendGSSAPIHeader(payload []byte) ([]byte, error) {
oidBytes, err := asn1.Marshal(gssapi.OID(gssapi.OIDKRB5))
if err != nil {
return nil, err
}
tkoLengthBytes := asn1tools.MarshalLengthBytes(len(oidBytes) + len(payload))
GSSHeader := append([]byte{GSS_API_GENERIC_TAG}, tkoLengthBytes...)
GSSHeader = append(GSSHeader, oidBytes...)
GSSPackage := append(GSSHeader, payload...)
return GSSPackage, nil
}
func (krbAuth *GSSAPIKerberosAuth) initSecContext(bytes []byte, kerberosClient KerberosClient) ([]byte, error) {
switch krbAuth.step {
case GSS_API_INITIAL:
aprBytes, err := krbAuth.createKrb5Token(
kerberosClient.Domain(),
kerberosClient.CName(),
krbAuth.ticket,
krbAuth.encKey)
if err != nil {
return nil, err
}
krbAuth.step = GSS_API_VERIFY
return krbAuth.appendGSSAPIHeader(aprBytes)
case GSS_API_VERIFY:
wrapTokenReq := gssapi.WrapToken{}
if err := wrapTokenReq.Unmarshal(bytes, true); err != nil {
return nil, err
}
// Validate response.
isValid, err := wrapTokenReq.Verify(krbAuth.encKey, keyusage.GSSAPI_ACCEPTOR_SEAL)
if !isValid {
return nil, err
}
wrapTokenResponse, err := gssapi.NewInitiatorWrapToken(wrapTokenReq.Payload, krbAuth.encKey)
if err != nil {
return nil, err
}
krbAuth.step = GSS_API_FINISH
return wrapTokenResponse.Marshal()
}
return nil, nil
}
/* This does the handshake for authorization */
func (krbAuth *GSSAPIKerberosAuth) Authorize(broker *Broker) error {
kerberosClient, err := krbAuth.NewKerberosClientFunc(krbAuth.Config)
if err != nil {
Logger.Printf("Kerberos client error: %s", err)
return err
}
err = kerberosClient.Login()
if err != nil {
Logger.Printf("Kerberos client error: %s", err)
return err
}
// Construct SPN using serviceName and host
// SPN format: <SERVICE>/<FQDN>
host := strings.SplitN(broker.addr, ":", 2)[0] // Strip port part
spn := fmt.Sprintf("%s/%s", broker.conf.Net.SASL.GSSAPI.ServiceName, host)
ticket, encKey, err := kerberosClient.GetServiceTicket(spn)
if err != nil {
Logger.Printf("Error getting Kerberos service ticket : %s", err)
return err
}
krbAuth.ticket = ticket
krbAuth.encKey = encKey
krbAuth.step = GSS_API_INITIAL
var receivedBytes []byte = nil
defer kerberosClient.Destroy()
for {
packBytes, err := krbAuth.initSecContext(receivedBytes, kerberosClient)
if err != nil {
Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err)
return err
}
requestTime := time.Now()
bytesWritten, err := krbAuth.writePackage(broker, packBytes)
if err != nil {
Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err)
return err
}
broker.updateOutgoingCommunicationMetrics(bytesWritten)
if krbAuth.step == GSS_API_VERIFY {
bytesRead := 0
receivedBytes, bytesRead, err = krbAuth.readPackage(broker)
requestLatency := time.Since(requestTime)
broker.updateIncomingCommunicationMetrics(bytesRead, requestLatency)
if err != nil {
Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err)
return err
}
} else if krbAuth.step == GSS_API_FINISH {
return nil
}
}
}

51
vendor/github.com/Shopify/sarama/heartbeat_request.go generated vendored Normal file
View File

@ -0,0 +1,51 @@
package sarama
type HeartbeatRequest struct {
GroupId string
GenerationId int32
MemberId string
}
func (r *HeartbeatRequest) encode(pe packetEncoder) error {
if err := pe.putString(r.GroupId); err != nil {
return err
}
pe.putInt32(r.GenerationId)
if err := pe.putString(r.MemberId); err != nil {
return err
}
return nil
}
func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) {
if r.GroupId, err = pd.getString(); err != nil {
return
}
if r.GenerationId, err = pd.getInt32(); err != nil {
return
}
if r.MemberId, err = pd.getString(); err != nil {
return
}
return nil
}
func (r *HeartbeatRequest) key() int16 {
return 12
}
func (r *HeartbeatRequest) version() int16 {
return 0
}
func (r *HeartbeatRequest) headerVersion() int16 {
return 1
}
func (r *HeartbeatRequest) requiredVersion() KafkaVersion {
return V0_9_0_0
}

36
vendor/github.com/Shopify/sarama/heartbeat_response.go generated vendored Normal file
View File

@ -0,0 +1,36 @@
package sarama
type HeartbeatResponse struct {
Err KError
}
func (r *HeartbeatResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(r.Err))
return nil
}
func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error {
kerr, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(kerr)
return nil
}
func (r *HeartbeatResponse) key() int16 {
return 12
}
func (r *HeartbeatResponse) version() int16 {
return 0
}
func (r *HeartbeatResponse) headerVersion() int16 {
return 0
}
func (r *HeartbeatResponse) requiredVersion() KafkaVersion {
return V0_9_0_0
}

View File

@ -0,0 +1,47 @@
package sarama
import "time"
type InitProducerIDRequest struct {
TransactionalID *string
TransactionTimeout time.Duration
}
func (i *InitProducerIDRequest) encode(pe packetEncoder) error {
if err := pe.putNullableString(i.TransactionalID); err != nil {
return err
}
pe.putInt32(int32(i.TransactionTimeout / time.Millisecond))
return nil
}
func (i *InitProducerIDRequest) decode(pd packetDecoder, version int16) (err error) {
if i.TransactionalID, err = pd.getNullableString(); err != nil {
return err
}
timeout, err := pd.getInt32()
if err != nil {
return err
}
i.TransactionTimeout = time.Duration(timeout) * time.Millisecond
return nil
}
func (i *InitProducerIDRequest) key() int16 {
return 22
}
func (i *InitProducerIDRequest) version() int16 {
return 0
}
func (i *InitProducerIDRequest) headerVersion() int16 {
return 1
}
func (i *InitProducerIDRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View File

@ -0,0 +1,59 @@
package sarama
import "time"
type InitProducerIDResponse struct {
ThrottleTime time.Duration
Err KError
ProducerID int64
ProducerEpoch int16
}
func (i *InitProducerIDResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(i.ThrottleTime / time.Millisecond))
pe.putInt16(int16(i.Err))
pe.putInt64(i.ProducerID)
pe.putInt16(i.ProducerEpoch)
return nil
}
func (i *InitProducerIDResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
i.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
kerr, err := pd.getInt16()
if err != nil {
return err
}
i.Err = KError(kerr)
if i.ProducerID, err = pd.getInt64(); err != nil {
return err
}
if i.ProducerEpoch, err = pd.getInt16(); err != nil {
return err
}
return nil
}
func (i *InitProducerIDResponse) key() int16 {
return 22
}
func (i *InitProducerIDResponse) version() int16 {
return 0
}
func (i *InitProducerIDResponse) headerVersion() int16 {
return 0
}
func (i *InitProducerIDResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}

167
vendor/github.com/Shopify/sarama/join_group_request.go generated vendored Normal file
View File

@ -0,0 +1,167 @@
package sarama
type GroupProtocol struct {
Name string
Metadata []byte
}
func (p *GroupProtocol) decode(pd packetDecoder) (err error) {
p.Name, err = pd.getString()
if err != nil {
return err
}
p.Metadata, err = pd.getBytes()
return err
}
func (p *GroupProtocol) encode(pe packetEncoder) (err error) {
if err := pe.putString(p.Name); err != nil {
return err
}
if err := pe.putBytes(p.Metadata); err != nil {
return err
}
return nil
}
type JoinGroupRequest struct {
Version int16
GroupId string
SessionTimeout int32
RebalanceTimeout int32
MemberId string
ProtocolType string
GroupProtocols map[string][]byte // deprecated; use OrderedGroupProtocols
OrderedGroupProtocols []*GroupProtocol
}
func (r *JoinGroupRequest) encode(pe packetEncoder) error {
if err := pe.putString(r.GroupId); err != nil {
return err
}
pe.putInt32(r.SessionTimeout)
if r.Version >= 1 {
pe.putInt32(r.RebalanceTimeout)
}
if err := pe.putString(r.MemberId); err != nil {
return err
}
if err := pe.putString(r.ProtocolType); err != nil {
return err
}
if len(r.GroupProtocols) > 0 {
if len(r.OrderedGroupProtocols) > 0 {
return PacketDecodingError{"cannot specify both GroupProtocols and OrderedGroupProtocols on JoinGroupRequest"}
}
if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil {
return err
}
for name, metadata := range r.GroupProtocols {
if err := pe.putString(name); err != nil {
return err
}
if err := pe.putBytes(metadata); err != nil {
return err
}
}
} else {
if err := pe.putArrayLength(len(r.OrderedGroupProtocols)); err != nil {
return err
}
for _, protocol := range r.OrderedGroupProtocols {
if err := protocol.encode(pe); err != nil {
return err
}
}
}
return nil
}
func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if r.GroupId, err = pd.getString(); err != nil {
return
}
if r.SessionTimeout, err = pd.getInt32(); err != nil {
return
}
if version >= 1 {
if r.RebalanceTimeout, err = pd.getInt32(); err != nil {
return err
}
}
if r.MemberId, err = pd.getString(); err != nil {
return
}
if r.ProtocolType, err = pd.getString(); err != nil {
return
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n == 0 {
return nil
}
r.GroupProtocols = make(map[string][]byte)
for i := 0; i < n; i++ {
protocol := &GroupProtocol{}
if err := protocol.decode(pd); err != nil {
return err
}
r.GroupProtocols[protocol.Name] = protocol.Metadata
r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, protocol)
}
return nil
}
func (r *JoinGroupRequest) key() int16 {
return 11
}
func (r *JoinGroupRequest) version() int16 {
return r.Version
}
func (r *JoinGroupRequest) headerVersion() int16 {
return 1
}
func (r *JoinGroupRequest) requiredVersion() KafkaVersion {
switch r.Version {
case 2:
return V0_11_0_0
case 1:
return V0_10_1_0
default:
return V0_9_0_0
}
}
func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) {
r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, &GroupProtocol{
Name: name,
Metadata: metadata,
})
}
func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error {
bin, err := encode(metadata, nil)
if err != nil {
return err
}
r.AddGroupProtocol(name, bin)
return nil
}

139
vendor/github.com/Shopify/sarama/join_group_response.go generated vendored Normal file
View File

@ -0,0 +1,139 @@
package sarama
type JoinGroupResponse struct {
Version int16
ThrottleTime int32
Err KError
GenerationId int32
GroupProtocol string
LeaderId string
MemberId string
Members map[string][]byte
}
func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) {
members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members))
for id, bin := range r.Members {
meta := new(ConsumerGroupMemberMetadata)
if err := decode(bin, meta); err != nil {
return nil, err
}
members[id] = *meta
}
return members, nil
}
func (r *JoinGroupResponse) encode(pe packetEncoder) error {
if r.Version >= 2 {
pe.putInt32(r.ThrottleTime)
}
pe.putInt16(int16(r.Err))
pe.putInt32(r.GenerationId)
if err := pe.putString(r.GroupProtocol); err != nil {
return err
}
if err := pe.putString(r.LeaderId); err != nil {
return err
}
if err := pe.putString(r.MemberId); err != nil {
return err
}
if err := pe.putArrayLength(len(r.Members)); err != nil {
return err
}
for memberId, memberMetadata := range r.Members {
if err := pe.putString(memberId); err != nil {
return err
}
if err := pe.putBytes(memberMetadata); err != nil {
return err
}
}
return nil
}
func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if version >= 2 {
if r.ThrottleTime, err = pd.getInt32(); err != nil {
return
}
}
kerr, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(kerr)
if r.GenerationId, err = pd.getInt32(); err != nil {
return
}
if r.GroupProtocol, err = pd.getString(); err != nil {
return
}
if r.LeaderId, err = pd.getString(); err != nil {
return
}
if r.MemberId, err = pd.getString(); err != nil {
return
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n == 0 {
return nil
}
r.Members = make(map[string][]byte)
for i := 0; i < n; i++ {
memberId, err := pd.getString()
if err != nil {
return err
}
memberMetadata, err := pd.getBytes()
if err != nil {
return err
}
r.Members[memberId] = memberMetadata
}
return nil
}
func (r *JoinGroupResponse) key() int16 {
return 11
}
func (r *JoinGroupResponse) version() int16 {
return r.Version
}
func (r *JoinGroupResponse) headerVersion() int16 {
return 0
}
func (r *JoinGroupResponse) requiredVersion() KafkaVersion {
switch r.Version {
case 2:
return V0_11_0_0
case 1:
return V0_10_1_0
default:
return V0_9_0_0
}
}

51
vendor/github.com/Shopify/sarama/kerberos_client.go generated vendored Normal file
View File

@ -0,0 +1,51 @@
package sarama
import (
krb5client "gopkg.in/jcmturner/gokrb5.v7/client"
krb5config "gopkg.in/jcmturner/gokrb5.v7/config"
"gopkg.in/jcmturner/gokrb5.v7/keytab"
"gopkg.in/jcmturner/gokrb5.v7/types"
)
type KerberosGoKrb5Client struct {
krb5client.Client
}
func (c *KerberosGoKrb5Client) Domain() string {
return c.Credentials.Domain()
}
func (c *KerberosGoKrb5Client) CName() types.PrincipalName {
return c.Credentials.CName()
}
/*
*
* Create kerberos client used to obtain TGT and TGS tokens
* used gokrb5 library, which is a pure go kerberos client with
* some GSS-API capabilities, and SPNEGO support. Kafka does not use SPNEGO
* it uses pure Kerberos 5 solution (RFC-4121 and RFC-4120).
*
*/
func NewKerberosClient(config *GSSAPIConfig) (KerberosClient, error) {
cfg, err := krb5config.Load(config.KerberosConfigPath)
if err != nil {
return nil, err
}
return createClient(config, cfg)
}
func createClient(config *GSSAPIConfig, cfg *krb5config.Config) (KerberosClient, error) {
var client *krb5client.Client
if config.AuthType == KRB5_KEYTAB_AUTH {
kt, err := keytab.Load(config.KeyTabPath)
if err != nil {
return nil, err
}
client = krb5client.NewClientWithKeytab(config.Username, config.Realm, kt, cfg)
} else {
client = krb5client.NewClientWithPassword(config.Username,
config.Realm, config.Password, cfg)
}
return &KerberosGoKrb5Client{*client}, nil
}

View File

@ -0,0 +1,44 @@
package sarama
type LeaveGroupRequest struct {
GroupId string
MemberId string
}
func (r *LeaveGroupRequest) encode(pe packetEncoder) error {
if err := pe.putString(r.GroupId); err != nil {
return err
}
if err := pe.putString(r.MemberId); err != nil {
return err
}
return nil
}
func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) {
if r.GroupId, err = pd.getString(); err != nil {
return
}
if r.MemberId, err = pd.getString(); err != nil {
return
}
return nil
}
func (r *LeaveGroupRequest) key() int16 {
return 13
}
func (r *LeaveGroupRequest) version() int16 {
return 0
}
func (r *LeaveGroupRequest) headerVersion() int16 {
return 1
}
func (r *LeaveGroupRequest) requiredVersion() KafkaVersion {
return V0_9_0_0
}

View File

@ -0,0 +1,36 @@
package sarama
type LeaveGroupResponse struct {
Err KError
}
func (r *LeaveGroupResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(r.Err))
return nil
}
func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(kerr)
return nil
}
func (r *LeaveGroupResponse) key() int16 {
return 13
}
func (r *LeaveGroupResponse) version() int16 {
return 0
}
func (r *LeaveGroupResponse) headerVersion() int16 {
return 0
}
func (r *LeaveGroupResponse) requiredVersion() KafkaVersion {
return V0_9_0_0
}

99
vendor/github.com/Shopify/sarama/length_field.go generated vendored Normal file
View File

@ -0,0 +1,99 @@
package sarama
import (
"encoding/binary"
"sync"
)
// LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths.
type lengthField struct {
startOffset int
length int32
}
var lengthFieldPool = sync.Pool{}
func acquireLengthField() *lengthField {
val := lengthFieldPool.Get()
if val != nil {
return val.(*lengthField)
}
return &lengthField{}
}
func releaseLengthField(m *lengthField) {
lengthFieldPool.Put(m)
}
func (l *lengthField) decode(pd packetDecoder) error {
var err error
l.length, err = pd.getInt32()
if err != nil {
return err
}
if l.length > int32(pd.remaining()) {
return ErrInsufficientData
}
return nil
}
func (l *lengthField) saveOffset(in int) {
l.startOffset = in
}
func (l *lengthField) reserveLength() int {
return 4
}
func (l *lengthField) run(curOffset int, buf []byte) error {
binary.BigEndian.PutUint32(buf[l.startOffset:], uint32(curOffset-l.startOffset-4))
return nil
}
func (l *lengthField) check(curOffset int, buf []byte) error {
if int32(curOffset-l.startOffset-4) != l.length {
return PacketDecodingError{"length field invalid"}
}
return nil
}
type varintLengthField struct {
startOffset int
length int64
}
func (l *varintLengthField) decode(pd packetDecoder) error {
var err error
l.length, err = pd.getVarint()
return err
}
func (l *varintLengthField) saveOffset(in int) {
l.startOffset = in
}
func (l *varintLengthField) adjustLength(currOffset int) int {
oldFieldSize := l.reserveLength()
l.length = int64(currOffset - l.startOffset - oldFieldSize)
return l.reserveLength() - oldFieldSize
}
func (l *varintLengthField) reserveLength() int {
var tmp [binary.MaxVarintLen64]byte
return binary.PutVarint(tmp[:], l.length)
}
func (l *varintLengthField) run(curOffset int, buf []byte) error {
binary.PutVarint(buf[l.startOffset:], l.length)
return nil
}
func (l *varintLengthField) check(curOffset int, buf []byte) error {
if int64(curOffset-l.startOffset-l.reserveLength()) != l.length {
return PacketDecodingError{"length field invalid"}
}
return nil
}

View File

@ -0,0 +1,28 @@
package sarama
type ListGroupsRequest struct {
}
func (r *ListGroupsRequest) encode(pe packetEncoder) error {
return nil
}
func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
return nil
}
func (r *ListGroupsRequest) key() int16 {
return 16
}
func (r *ListGroupsRequest) version() int16 {
return 0
}
func (r *ListGroupsRequest) headerVersion() int16 {
return 1
}
func (r *ListGroupsRequest) requiredVersion() KafkaVersion {
return V0_9_0_0
}

View File

@ -0,0 +1,73 @@
package sarama
type ListGroupsResponse struct {
Err KError
Groups map[string]string
}
func (r *ListGroupsResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(r.Err))
if err := pe.putArrayLength(len(r.Groups)); err != nil {
return err
}
for groupId, protocolType := range r.Groups {
if err := pe.putString(groupId); err != nil {
return err
}
if err := pe.putString(protocolType); err != nil {
return err
}
}
return nil
}
func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error {
kerr, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(kerr)
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n == 0 {
return nil
}
r.Groups = make(map[string]string)
for i := 0; i < n; i++ {
groupId, err := pd.getString()
if err != nil {
return err
}
protocolType, err := pd.getString()
if err != nil {
return err
}
r.Groups[groupId] = protocolType
}
return nil
}
func (r *ListGroupsResponse) key() int16 {
return 16
}
func (r *ListGroupsResponse) version() int16 {
return 0
}
func (r *ListGroupsResponse) headerVersion() int16 {
return 0
}
func (r *ListGroupsResponse) requiredVersion() KafkaVersion {
return V0_9_0_0
}

View File

@ -0,0 +1,98 @@
package sarama
type ListPartitionReassignmentsRequest struct {
TimeoutMs int32
blocks map[string][]int32
Version int16
}
func (r *ListPartitionReassignmentsRequest) encode(pe packetEncoder) error {
pe.putInt32(r.TimeoutMs)
pe.putCompactArrayLength(len(r.blocks))
for topic, partitions := range r.blocks {
if err := pe.putCompactString(topic); err != nil {
return err
}
if err := pe.putCompactInt32Array(partitions); err != nil {
return err
}
pe.putEmptyTaggedFieldArray()
}
pe.putEmptyTaggedFieldArray()
return nil
}
func (r *ListPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if r.TimeoutMs, err = pd.getInt32(); err != nil {
return err
}
topicCount, err := pd.getCompactArrayLength()
if err != nil {
return err
}
if topicCount > 0 {
r.blocks = make(map[string][]int32)
for i := 0; i < topicCount; i++ {
topic, err := pd.getCompactString()
if err != nil {
return err
}
partitionCount, err := pd.getCompactArrayLength()
if err != nil {
return err
}
r.blocks[topic] = make([]int32, partitionCount)
for j := 0; j < partitionCount; j++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
r.blocks[topic][j] = partition
}
if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
return err
}
}
}
if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
return err
}
return
}
func (r *ListPartitionReassignmentsRequest) key() int16 {
return 46
}
func (r *ListPartitionReassignmentsRequest) version() int16 {
return r.Version
}
func (r *ListPartitionReassignmentsRequest) headerVersion() int16 {
return 2
}
func (r *ListPartitionReassignmentsRequest) requiredVersion() KafkaVersion {
return V2_4_0_0
}
func (r *ListPartitionReassignmentsRequest) AddBlock(topic string, partitionIDs []int32) {
if r.blocks == nil {
r.blocks = make(map[string][]int32)
}
if r.blocks[topic] == nil {
r.blocks[topic] = partitionIDs
}
}

View File

@ -0,0 +1,169 @@
package sarama
type PartitionReplicaReassignmentsStatus struct {
Replicas []int32
AddingReplicas []int32
RemovingReplicas []int32
}
func (b *PartitionReplicaReassignmentsStatus) encode(pe packetEncoder) error {
if err := pe.putCompactInt32Array(b.Replicas); err != nil {
return err
}
if err := pe.putCompactInt32Array(b.AddingReplicas); err != nil {
return err
}
if err := pe.putCompactInt32Array(b.RemovingReplicas); err != nil {
return err
}
pe.putEmptyTaggedFieldArray()
return nil
}
func (b *PartitionReplicaReassignmentsStatus) decode(pd packetDecoder) (err error) {
if b.Replicas, err = pd.getCompactInt32Array(); err != nil {
return err
}
if b.AddingReplicas, err = pd.getCompactInt32Array(); err != nil {
return err
}
if b.RemovingReplicas, err = pd.getCompactInt32Array(); err != nil {
return err
}
if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
return err
}
return err
}
type ListPartitionReassignmentsResponse struct {
Version int16
ThrottleTimeMs int32
ErrorCode KError
ErrorMessage *string
TopicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus
}
func (r *ListPartitionReassignmentsResponse) AddBlock(topic string, partition int32, replicas, addingReplicas, removingReplicas []int32) {
if r.TopicStatus == nil {
r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus)
}
partitions := r.TopicStatus[topic]
if partitions == nil {
partitions = make(map[int32]*PartitionReplicaReassignmentsStatus)
r.TopicStatus[topic] = partitions
}
partitions[partition] = &PartitionReplicaReassignmentsStatus{Replicas: replicas, AddingReplicas: addingReplicas, RemovingReplicas: removingReplicas}
}
func (r *ListPartitionReassignmentsResponse) encode(pe packetEncoder) error {
pe.putInt32(r.ThrottleTimeMs)
pe.putInt16(int16(r.ErrorCode))
if err := pe.putNullableCompactString(r.ErrorMessage); err != nil {
return err
}
pe.putCompactArrayLength(len(r.TopicStatus))
for topic, partitions := range r.TopicStatus {
if err := pe.putCompactString(topic); err != nil {
return err
}
pe.putCompactArrayLength(len(partitions))
for partition, block := range partitions {
pe.putInt32(partition)
if err := block.encode(pe); err != nil {
return err
}
}
pe.putEmptyTaggedFieldArray()
}
pe.putEmptyTaggedFieldArray()
return nil
}
func (r *ListPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
return err
}
kerr, err := pd.getInt16()
if err != nil {
return err
}
r.ErrorCode = KError(kerr)
if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil {
return err
}
numTopics, err := pd.getCompactArrayLength()
if err != nil {
return err
}
r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus, numTopics)
for i := 0; i < numTopics; i++ {
topic, err := pd.getCompactString()
if err != nil {
return err
}
ongoingPartitionReassignments, err := pd.getCompactArrayLength()
if err != nil {
return err
}
r.TopicStatus[topic] = make(map[int32]*PartitionReplicaReassignmentsStatus, ongoingPartitionReassignments)
for j := 0; j < ongoingPartitionReassignments; j++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
block := &PartitionReplicaReassignmentsStatus{}
if err := block.decode(pd); err != nil {
return err
}
r.TopicStatus[topic][partition] = block
}
if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
return err
}
}
if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
return err
}
return nil
}
func (r *ListPartitionReassignmentsResponse) key() int16 {
return 46
}
func (r *ListPartitionReassignmentsResponse) version() int16 {
return r.Version
}
func (r *ListPartitionReassignmentsResponse) headerVersion() int16 {
return 1
}
func (r *ListPartitionReassignmentsResponse) requiredVersion() KafkaVersion {
return V2_4_0_0
}

174
vendor/github.com/Shopify/sarama/message.go generated vendored Normal file
View File

@ -0,0 +1,174 @@
package sarama
import (
"fmt"
"time"
)
const (
//CompressionNone no compression
CompressionNone CompressionCodec = iota
//CompressionGZIP compression using GZIP
CompressionGZIP
//CompressionSnappy compression using snappy
CompressionSnappy
//CompressionLZ4 compression using LZ4
CompressionLZ4
//CompressionZSTD compression using ZSTD
CompressionZSTD
// The lowest 3 bits contain the compression codec used for the message
compressionCodecMask int8 = 0x07
// Bit 3 set for "LogAppend" timestamps
timestampTypeMask = 0x08
// CompressionLevelDefault is the constant to use in CompressionLevel
// to have the default compression level for any codec. The value is picked
// that we don't use any existing compression levels.
CompressionLevelDefault = -1000
)
// CompressionCodec represents the various compression codecs recognized by Kafka in messages.
type CompressionCodec int8
func (cc CompressionCodec) String() string {
return []string{
"none",
"gzip",
"snappy",
"lz4",
"zstd",
}[int(cc)]
}
//Message is a kafka message type
type Message struct {
Codec CompressionCodec // codec used to compress the message contents
CompressionLevel int // compression level
LogAppendTime bool // the used timestamp is LogAppendTime
Key []byte // the message key, may be nil
Value []byte // the message contents
Set *MessageSet // the message set a message might wrap
Version int8 // v1 requires Kafka 0.10
Timestamp time.Time // the timestamp of the message (version 1+ only)
compressedCache []byte
compressedSize int // used for computing the compression ratio metrics
}
func (m *Message) encode(pe packetEncoder) error {
pe.push(newCRC32Field(crcIEEE))
pe.putInt8(m.Version)
attributes := int8(m.Codec) & compressionCodecMask
if m.LogAppendTime {
attributes |= timestampTypeMask
}
pe.putInt8(attributes)
if m.Version >= 1 {
if err := (Timestamp{&m.Timestamp}).encode(pe); err != nil {
return err
}
}
err := pe.putBytes(m.Key)
if err != nil {
return err
}
var payload []byte
if m.compressedCache != nil {
payload = m.compressedCache
m.compressedCache = nil
} else if m.Value != nil {
payload, err = compress(m.Codec, m.CompressionLevel, m.Value)
if err != nil {
return err
}
m.compressedCache = payload
// Keep in mind the compressed payload size for metric gathering
m.compressedSize = len(payload)
}
if err = pe.putBytes(payload); err != nil {
return err
}
return pe.pop()
}
func (m *Message) decode(pd packetDecoder) (err error) {
crc32Decoder := acquireCrc32Field(crcIEEE)
defer releaseCrc32Field(crc32Decoder)
err = pd.push(crc32Decoder)
if err != nil {
return err
}
m.Version, err = pd.getInt8()
if err != nil {
return err
}
if m.Version > 1 {
return PacketDecodingError{fmt.Sprintf("unknown magic byte (%v)", m.Version)}
}
attribute, err := pd.getInt8()
if err != nil {
return err
}
m.Codec = CompressionCodec(attribute & compressionCodecMask)
m.LogAppendTime = attribute&timestampTypeMask == timestampTypeMask
if m.Version == 1 {
if err := (Timestamp{&m.Timestamp}).decode(pd); err != nil {
return err
}
}
m.Key, err = pd.getBytes()
if err != nil {
return err
}
m.Value, err = pd.getBytes()
if err != nil {
return err
}
// Required for deep equal assertion during tests but might be useful
// for future metrics about the compression ratio in fetch requests
m.compressedSize = len(m.Value)
switch m.Codec {
case CompressionNone:
// nothing to do
default:
if m.Value == nil {
break
}
m.Value, err = decompress(m.Codec, m.Value)
if err != nil {
return err
}
if err := m.decodeSet(); err != nil {
return err
}
}
return pd.pop()
}
// decodes a message set from a previously encoded bulk-message
func (m *Message) decodeSet() (err error) {
pd := realDecoder{raw: m.Value}
m.Set = &MessageSet{}
return m.Set.decode(&pd)
}

111
vendor/github.com/Shopify/sarama/message_set.go generated vendored Normal file
View File

@ -0,0 +1,111 @@
package sarama
type MessageBlock struct {
Offset int64
Msg *Message
}
// Messages convenience helper which returns either all the
// messages that are wrapped in this block
func (msb *MessageBlock) Messages() []*MessageBlock {
if msb.Msg.Set != nil {
return msb.Msg.Set.Messages
}
return []*MessageBlock{msb}
}
func (msb *MessageBlock) encode(pe packetEncoder) error {
pe.putInt64(msb.Offset)
pe.push(&lengthField{})
err := msb.Msg.encode(pe)
if err != nil {
return err
}
return pe.pop()
}
func (msb *MessageBlock) decode(pd packetDecoder) (err error) {
if msb.Offset, err = pd.getInt64(); err != nil {
return err
}
lengthDecoder := acquireLengthField()
defer releaseLengthField(lengthDecoder)
if err = pd.push(lengthDecoder); err != nil {
return err
}
msb.Msg = new(Message)
if err = msb.Msg.decode(pd); err != nil {
return err
}
if err = pd.pop(); err != nil {
return err
}
return nil
}
type MessageSet struct {
PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock
OverflowMessage bool // whether the set on the wire contained an overflow message
Messages []*MessageBlock
}
func (ms *MessageSet) encode(pe packetEncoder) error {
for i := range ms.Messages {
err := ms.Messages[i].encode(pe)
if err != nil {
return err
}
}
return nil
}
func (ms *MessageSet) decode(pd packetDecoder) (err error) {
ms.Messages = nil
for pd.remaining() > 0 {
magic, err := magicValue(pd)
if err != nil {
if err == ErrInsufficientData {
ms.PartialTrailingMessage = true
return nil
}
return err
}
if magic > 1 {
return nil
}
msb := new(MessageBlock)
err = msb.decode(pd)
switch err {
case nil:
ms.Messages = append(ms.Messages, msb)
case ErrInsufficientData:
// As an optimization the server is allowed to return a partial message at the
// end of the message set. Clients should handle this case. So we just ignore such things.
if msb.Offset == -1 {
// This is an overflow message caused by chunked down conversion
ms.OverflowMessage = true
} else {
ms.PartialTrailingMessage = true
}
return nil
default:
return err
}
}
return nil
}
func (ms *MessageSet) addMessage(msg *Message) {
block := new(MessageBlock)
block.Msg = msg
ms.Messages = append(ms.Messages, block)
}

85
vendor/github.com/Shopify/sarama/metadata_request.go generated vendored Normal file
View File

@ -0,0 +1,85 @@
package sarama
type MetadataRequest struct {
Version int16
Topics []string
AllowAutoTopicCreation bool
}
func (r *MetadataRequest) encode(pe packetEncoder) error {
if r.Version < 0 || r.Version > 5 {
return PacketEncodingError{"invalid or unsupported MetadataRequest version field"}
}
if r.Version == 0 || len(r.Topics) > 0 {
err := pe.putArrayLength(len(r.Topics))
if err != nil {
return err
}
for i := range r.Topics {
err = pe.putString(r.Topics[i])
if err != nil {
return err
}
}
} else {
pe.putInt32(-1)
}
if r.Version > 3 {
pe.putBool(r.AllowAutoTopicCreation)
}
return nil
}
func (r *MetadataRequest) decode(pd packetDecoder, version int16) error {
r.Version = version
size, err := pd.getInt32()
if err != nil {
return err
}
if size > 0 {
r.Topics = make([]string, size)
for i := range r.Topics {
topic, err := pd.getString()
if err != nil {
return err
}
r.Topics[i] = topic
}
}
if r.Version > 3 {
autoCreation, err := pd.getBool()
if err != nil {
return err
}
r.AllowAutoTopicCreation = autoCreation
}
return nil
}
func (r *MetadataRequest) key() int16 {
return 3
}
func (r *MetadataRequest) version() int16 {
return r.Version
}
func (r *MetadataRequest) headerVersion() int16 {
return 1
}
func (r *MetadataRequest) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V0_10_0_0
case 2:
return V0_10_1_0
case 3, 4:
return V0_11_0_0
case 5:
return V1_0_0_0
default:
return MinVersion
}
}

325
vendor/github.com/Shopify/sarama/metadata_response.go generated vendored Normal file
View File

@ -0,0 +1,325 @@
package sarama
type PartitionMetadata struct {
Err KError
ID int32
Leader int32
Replicas []int32
Isr []int32
OfflineReplicas []int32
}
func (pm *PartitionMetadata) decode(pd packetDecoder, version int16) (err error) {
tmp, err := pd.getInt16()
if err != nil {
return err
}
pm.Err = KError(tmp)
pm.ID, err = pd.getInt32()
if err != nil {
return err
}
pm.Leader, err = pd.getInt32()
if err != nil {
return err
}
pm.Replicas, err = pd.getInt32Array()
if err != nil {
return err
}
pm.Isr, err = pd.getInt32Array()
if err != nil {
return err
}
if version >= 5 {
pm.OfflineReplicas, err = pd.getInt32Array()
if err != nil {
return err
}
}
return nil
}
func (pm *PartitionMetadata) encode(pe packetEncoder, version int16) (err error) {
pe.putInt16(int16(pm.Err))
pe.putInt32(pm.ID)
pe.putInt32(pm.Leader)
err = pe.putInt32Array(pm.Replicas)
if err != nil {
return err
}
err = pe.putInt32Array(pm.Isr)
if err != nil {
return err
}
if version >= 5 {
err = pe.putInt32Array(pm.OfflineReplicas)
if err != nil {
return err
}
}
return nil
}
type TopicMetadata struct {
Err KError
Name string
IsInternal bool // Only valid for Version >= 1
Partitions []*PartitionMetadata
}
func (tm *TopicMetadata) decode(pd packetDecoder, version int16) (err error) {
tmp, err := pd.getInt16()
if err != nil {
return err
}
tm.Err = KError(tmp)
tm.Name, err = pd.getString()
if err != nil {
return err
}
if version >= 1 {
tm.IsInternal, err = pd.getBool()
if err != nil {
return err
}
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
tm.Partitions = make([]*PartitionMetadata, n)
for i := 0; i < n; i++ {
tm.Partitions[i] = new(PartitionMetadata)
err = tm.Partitions[i].decode(pd, version)
if err != nil {
return err
}
}
return nil
}
func (tm *TopicMetadata) encode(pe packetEncoder, version int16) (err error) {
pe.putInt16(int16(tm.Err))
err = pe.putString(tm.Name)
if err != nil {
return err
}
if version >= 1 {
pe.putBool(tm.IsInternal)
}
err = pe.putArrayLength(len(tm.Partitions))
if err != nil {
return err
}
for _, pm := range tm.Partitions {
err = pm.encode(pe, version)
if err != nil {
return err
}
}
return nil
}
type MetadataResponse struct {
Version int16
ThrottleTimeMs int32
Brokers []*Broker
ClusterID *string
ControllerID int32
Topics []*TopicMetadata
}
func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if version >= 3 {
r.ThrottleTimeMs, err = pd.getInt32()
if err != nil {
return err
}
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Brokers = make([]*Broker, n)
for i := 0; i < n; i++ {
r.Brokers[i] = new(Broker)
err = r.Brokers[i].decode(pd, version)
if err != nil {
return err
}
}
if version >= 2 {
r.ClusterID, err = pd.getNullableString()
if err != nil {
return err
}
}
if version >= 1 {
r.ControllerID, err = pd.getInt32()
if err != nil {
return err
}
} else {
r.ControllerID = -1
}
n, err = pd.getArrayLength()
if err != nil {
return err
}
r.Topics = make([]*TopicMetadata, n)
for i := 0; i < n; i++ {
r.Topics[i] = new(TopicMetadata)
err = r.Topics[i].decode(pd, version)
if err != nil {
return err
}
}
return nil
}
func (r *MetadataResponse) encode(pe packetEncoder) error {
if r.Version >= 3 {
pe.putInt32(r.ThrottleTimeMs)
}
err := pe.putArrayLength(len(r.Brokers))
if err != nil {
return err
}
for _, broker := range r.Brokers {
err = broker.encode(pe, r.Version)
if err != nil {
return err
}
}
if r.Version >= 2 {
err := pe.putNullableString(r.ClusterID)
if err != nil {
return err
}
}
if r.Version >= 1 {
pe.putInt32(r.ControllerID)
}
err = pe.putArrayLength(len(r.Topics))
if err != nil {
return err
}
for _, tm := range r.Topics {
err = tm.encode(pe, r.Version)
if err != nil {
return err
}
}
return nil
}
func (r *MetadataResponse) key() int16 {
return 3
}
func (r *MetadataResponse) version() int16 {
return r.Version
}
func (r *MetadataResponse) headerVersion() int16 {
return 0
}
func (r *MetadataResponse) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V0_10_0_0
case 2:
return V0_10_1_0
case 3, 4:
return V0_11_0_0
case 5:
return V1_0_0_0
default:
return MinVersion
}
}
// testing API
func (r *MetadataResponse) AddBroker(addr string, id int32) {
r.Brokers = append(r.Brokers, &Broker{id: id, addr: addr})
}
func (r *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata {
var tmatch *TopicMetadata
for _, tm := range r.Topics {
if tm.Name == topic {
tmatch = tm
goto foundTopic
}
}
tmatch = new(TopicMetadata)
tmatch.Name = topic
r.Topics = append(r.Topics, tmatch)
foundTopic:
tmatch.Err = err
return tmatch
}
func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, offline []int32, err KError) {
tmatch := r.AddTopic(topic, ErrNoError)
var pmatch *PartitionMetadata
for _, pm := range tmatch.Partitions {
if pm.ID == partition {
pmatch = pm
goto foundPartition
}
}
pmatch = new(PartitionMetadata)
pmatch.ID = partition
tmatch.Partitions = append(tmatch.Partitions, pmatch)
foundPartition:
pmatch.Leader = brokerID
pmatch.Replicas = replicas
pmatch.Isr = isr
pmatch.OfflineReplicas = offline
pmatch.Err = err
}

43
vendor/github.com/Shopify/sarama/metrics.go generated vendored Normal file
View File

@ -0,0 +1,43 @@
package sarama
import (
"fmt"
"strings"
"github.com/rcrowley/go-metrics"
)
// Use exponentially decaying reservoir for sampling histograms with the same defaults as the Java library:
// 1028 elements, which offers a 99.9% confidence level with a 5% margin of error assuming a normal distribution,
// and an alpha factor of 0.015, which heavily biases the reservoir to the past 5 minutes of measurements.
// See https://github.com/dropwizard/metrics/blob/v3.1.0/metrics-core/src/main/java/com/codahale/metrics/ExponentiallyDecayingReservoir.java#L38
const (
metricsReservoirSize = 1028
metricsAlphaFactor = 0.015
)
func getOrRegisterHistogram(name string, r metrics.Registry) metrics.Histogram {
return r.GetOrRegister(name, func() metrics.Histogram {
return metrics.NewHistogram(metrics.NewExpDecaySample(metricsReservoirSize, metricsAlphaFactor))
}).(metrics.Histogram)
}
func getMetricNameForBroker(name string, broker *Broker) string {
// Use broker id like the Java client as it does not contain '.' or ':' characters that
// can be interpreted as special character by monitoring tool (e.g. Graphite)
return fmt.Sprintf(name+"-for-broker-%d", broker.ID())
}
func getMetricNameForTopic(name string, topic string) string {
// Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy
// cf. KAFKA-1902 and KAFKA-2337
return fmt.Sprintf(name+"-for-topic-%s", strings.Replace(topic, ".", "_", -1))
}
func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter {
return metrics.GetOrRegisterMeter(getMetricNameForTopic(name, topic), r)
}
func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram {
return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r)
}

415
vendor/github.com/Shopify/sarama/mockbroker.go generated vendored Normal file
View File

@ -0,0 +1,415 @@
package sarama
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"net"
"reflect"
"strconv"
"sync"
"time"
"github.com/davecgh/go-spew/spew"
)
const (
expectationTimeout = 500 * time.Millisecond
)
type GSSApiHandlerFunc func([]byte) []byte
type requestHandlerFunc func(req *request) (res encoderWithHeader)
// RequestNotifierFunc is invoked when a mock broker processes a request successfully
// and will provides the number of bytes read and written.
type RequestNotifierFunc func(bytesRead, bytesWritten int)
// MockBroker is a mock Kafka broker that is used in unit tests. It is exposed
// to facilitate testing of higher level or specialized consumers and producers
// built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol,
// but rather provides a facility to do that. It takes care of the TCP
// transport, request unmarshaling, response marshaling, and makes it the test
// writer responsibility to program correct according to the Kafka API protocol
// MockBroker behaviour.
//
// MockBroker is implemented as a TCP server listening on a kernel-selected
// localhost port that can accept many connections. It reads Kafka requests
// from that connection and returns responses programmed by the SetHandlerByMap
// function. If a MockBroker receives a request that it has no programmed
// response for, then it returns nothing and the request times out.
//
// A set of MockRequest builders to define mappings used by MockBroker is
// provided by Sarama. But users can develop MockRequests of their own and use
// them along with or instead of the standard ones.
//
// When running tests with MockBroker it is strongly recommended to specify
// a timeout to `go test` so that if the broker hangs waiting for a response,
// the test panics.
//
// It is not necessary to prefix message length or correlation ID to your
// response bytes, the server does that automatically as a convenience.
type MockBroker struct {
brokerID int32
port int32
closing chan none
stopper chan none
expectations chan encoderWithHeader
listener net.Listener
t TestReporter
latency time.Duration
handler requestHandlerFunc
notifier RequestNotifierFunc
history []RequestResponse
lock sync.Mutex
gssApiHandler GSSApiHandlerFunc
}
// RequestResponse represents a Request/Response pair processed by MockBroker.
type RequestResponse struct {
Request protocolBody
Response encoder
}
// SetLatency makes broker pause for the specified period every time before
// replying.
func (b *MockBroker) SetLatency(latency time.Duration) {
b.latency = latency
}
// SetHandlerByMap defines mapping of Request types to MockResponses. When a
// request is received by the broker, it looks up the request type in the map
// and uses the found MockResponse instance to generate an appropriate reply.
// If the request type is not found in the map then nothing is sent.
func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) {
b.setHandler(func(req *request) (res encoderWithHeader) {
reqTypeName := reflect.TypeOf(req.body).Elem().Name()
mockResponse := handlerMap[reqTypeName]
if mockResponse == nil {
return nil
}
return mockResponse.For(req.body)
})
}
// SetNotifier set a function that will get invoked whenever a request has been
// processed successfully and will provide the number of bytes read and written
func (b *MockBroker) SetNotifier(notifier RequestNotifierFunc) {
b.lock.Lock()
b.notifier = notifier
b.lock.Unlock()
}
// BrokerID returns broker ID assigned to the broker.
func (b *MockBroker) BrokerID() int32 {
return b.brokerID
}
// History returns a slice of RequestResponse pairs in the order they were
// processed by the broker. Note that in case of multiple connections to the
// broker the order expected by a test can be different from the order recorded
// in the history, unless some synchronization is implemented in the test.
func (b *MockBroker) History() []RequestResponse {
b.lock.Lock()
history := make([]RequestResponse, len(b.history))
copy(history, b.history)
b.lock.Unlock()
return history
}
// Port returns the TCP port number the broker is listening for requests on.
func (b *MockBroker) Port() int32 {
return b.port
}
// Addr returns the broker connection string in the form "<address>:<port>".
func (b *MockBroker) Addr() string {
return b.listener.Addr().String()
}
// Close terminates the broker blocking until it stops internal goroutines and
// releases all resources.
func (b *MockBroker) Close() {
close(b.expectations)
if len(b.expectations) > 0 {
buf := bytes.NewBufferString(fmt.Sprintf("mockbroker/%d: not all expectations were satisfied! Still waiting on:\n", b.BrokerID()))
for e := range b.expectations {
_, _ = buf.WriteString(spew.Sdump(e))
}
b.t.Error(buf.String())
}
close(b.closing)
<-b.stopper
}
// setHandler sets the specified function as the request handler. Whenever
// a mock broker reads a request from the wire it passes the request to the
// function and sends back whatever the handler function returns.
func (b *MockBroker) setHandler(handler requestHandlerFunc) {
b.lock.Lock()
b.handler = handler
b.lock.Unlock()
}
func (b *MockBroker) serverLoop() {
defer close(b.stopper)
var err error
var conn net.Conn
go func() {
<-b.closing
err := b.listener.Close()
if err != nil {
b.t.Error(err)
}
}()
wg := &sync.WaitGroup{}
i := 0
for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() {
wg.Add(1)
go b.handleRequests(conn, i, wg)
i++
}
wg.Wait()
Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err)
}
func (b *MockBroker) SetGSSAPIHandler(handler GSSApiHandlerFunc) {
b.gssApiHandler = handler
}
func (b *MockBroker) readToBytes(r io.Reader) ([]byte, error) {
var (
bytesRead int
lengthBytes = make([]byte, 4)
)
if _, err := io.ReadFull(r, lengthBytes); err != nil {
return nil, err
}
bytesRead += len(lengthBytes)
length := int32(binary.BigEndian.Uint32(lengthBytes))
if length <= 4 || length > MaxRequestSize {
return nil, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
}
encodedReq := make([]byte, length)
if _, err := io.ReadFull(r, encodedReq); err != nil {
return nil, err
}
bytesRead += len(encodedReq)
fullBytes := append(lengthBytes, encodedReq...)
return fullBytes, nil
}
func (b *MockBroker) isGSSAPI(buffer []byte) bool {
return buffer[4] == 0x60 || bytes.Equal(buffer[4:6], []byte{0x05, 0x04})
}
func (b *MockBroker) handleRequests(conn io.ReadWriteCloser, idx int, wg *sync.WaitGroup) {
defer wg.Done()
defer func() {
_ = conn.Close()
}()
Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx)
var err error
abort := make(chan none)
defer close(abort)
go func() {
select {
case <-b.closing:
_ = conn.Close()
case <-abort:
}
}()
var bytesWritten int
var bytesRead int
for {
buffer, err := b.readToBytes(conn)
if err != nil {
Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(buffer))
b.serverError(err)
break
}
bytesWritten = 0
if !b.isGSSAPI(buffer) {
req, br, err := decodeRequest(bytes.NewReader(buffer))
bytesRead = br
if err != nil {
Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req))
b.serverError(err)
break
}
if b.latency > 0 {
time.Sleep(b.latency)
}
b.lock.Lock()
res := b.handler(req)
b.history = append(b.history, RequestResponse{req.body, res})
b.lock.Unlock()
if res == nil {
Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req))
continue
}
Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res)
encodedRes, err := encode(res, nil)
if err != nil {
b.serverError(err)
break
}
if len(encodedRes) == 0 {
b.lock.Lock()
if b.notifier != nil {
b.notifier(bytesRead, 0)
}
b.lock.Unlock()
continue
}
resHeader := b.encodeHeader(res.headerVersion(), req.correlationID, uint32(len(encodedRes)))
if _, err = conn.Write(resHeader); err != nil {
b.serverError(err)
break
}
if _, err = conn.Write(encodedRes); err != nil {
b.serverError(err)
break
}
bytesWritten = len(resHeader) + len(encodedRes)
} else {
// GSSAPI is not part of kafka protocol, but is supported for authentication proposes.
// Don't support history for this kind of request as is only used for test GSSAPI authentication mechanism
b.lock.Lock()
res := b.gssApiHandler(buffer)
b.lock.Unlock()
if res == nil {
Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(buffer))
continue
}
if _, err = conn.Write(res); err != nil {
b.serverError(err)
break
}
bytesWritten = len(res)
}
b.lock.Lock()
if b.notifier != nil {
b.notifier(bytesRead, bytesWritten)
}
b.lock.Unlock()
}
Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err)
}
func (b *MockBroker) encodeHeader(headerVersion int16, correlationId int32, payloadLength uint32) []byte {
headerLength := uint32(8)
if headerVersion >= 1 {
headerLength = 9
}
resHeader := make([]byte, headerLength)
binary.BigEndian.PutUint32(resHeader, payloadLength+headerLength-4)
binary.BigEndian.PutUint32(resHeader[4:], uint32(correlationId))
if headerVersion >= 1 {
binary.PutUvarint(resHeader[8:], 0)
}
return resHeader
}
func (b *MockBroker) defaultRequestHandler(req *request) (res encoderWithHeader) {
select {
case res, ok := <-b.expectations:
if !ok {
return nil
}
return res
case <-time.After(expectationTimeout):
return nil
}
}
func (b *MockBroker) serverError(err error) {
isConnectionClosedError := false
if _, ok := err.(*net.OpError); ok {
isConnectionClosedError = true
} else if err == io.EOF {
isConnectionClosedError = true
} else if err.Error() == "use of closed network connection" {
isConnectionClosedError = true
}
if isConnectionClosedError {
return
}
b.t.Errorf(err.Error())
}
// NewMockBroker launches a fake Kafka broker. It takes a TestReporter as provided by the
// test framework and a channel of responses to use. If an error occurs it is
// simply logged to the TestReporter and the broker exits.
func NewMockBroker(t TestReporter, brokerID int32) *MockBroker {
return NewMockBrokerAddr(t, brokerID, "localhost:0")
}
// NewMockBrokerAddr behaves like newMockBroker but listens on the address you give
// it rather than just some ephemeral port.
func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker {
listener, err := net.Listen("tcp", addr)
if err != nil {
t.Fatal(err)
}
return NewMockBrokerListener(t, brokerID, listener)
}
// NewMockBrokerListener behaves like newMockBrokerAddr but accepts connections on the listener specified.
func NewMockBrokerListener(t TestReporter, brokerID int32, listener net.Listener) *MockBroker {
var err error
broker := &MockBroker{
closing: make(chan none),
stopper: make(chan none),
t: t,
brokerID: brokerID,
expectations: make(chan encoderWithHeader, 512),
listener: listener,
}
broker.handler = broker.defaultRequestHandler
Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String())
_, portStr, err := net.SplitHostPort(broker.listener.Addr().String())
if err != nil {
t.Fatal(err)
}
tmp, err := strconv.ParseInt(portStr, 10, 32)
if err != nil {
t.Fatal(err)
}
broker.port = int32(tmp)
go broker.serverLoop()
return broker
}
func (b *MockBroker) Returns(e encoderWithHeader) {
b.expectations <- e
}

123
vendor/github.com/Shopify/sarama/mockkerberos.go generated vendored Normal file
View File

@ -0,0 +1,123 @@
package sarama
import (
"encoding/binary"
"encoding/hex"
"gopkg.in/jcmturner/gokrb5.v7/credentials"
"gopkg.in/jcmturner/gokrb5.v7/gssapi"
"gopkg.in/jcmturner/gokrb5.v7/iana/keyusage"
"gopkg.in/jcmturner/gokrb5.v7/messages"
"gopkg.in/jcmturner/gokrb5.v7/types"
)
type KafkaGSSAPIHandler struct {
client *MockKerberosClient
badResponse bool
badKeyChecksum bool
}
func (h *KafkaGSSAPIHandler) MockKafkaGSSAPI(buffer []byte) []byte {
// Default payload used for verify
err := h.client.Login() // Mock client construct keys when login
if err != nil {
return nil
}
if h.badResponse { // Returns trash
return []byte{0x00, 0x00, 0x00, 0x01, 0xAD}
}
var pack = gssapi.WrapToken{
Flags: KRB5_USER_AUTH,
EC: 12,
RRC: 0,
SndSeqNum: 3398292281,
Payload: []byte{0x11, 0x00}, // 1100
}
// Compute checksum
if h.badKeyChecksum {
pack.CheckSum = []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}
} else {
err = pack.SetCheckSum(h.client.ASRep.DecryptedEncPart.Key, keyusage.GSSAPI_ACCEPTOR_SEAL)
if err != nil {
return nil
}
}
packBytes, err := pack.Marshal()
if err != nil {
return nil
}
lenBytes := len(packBytes)
response := make([]byte, lenBytes+4)
copy(response[4:], packBytes)
binary.BigEndian.PutUint32(response, uint32(lenBytes))
return response
}
type MockKerberosClient struct {
asRepBytes string
ASRep messages.ASRep
credentials *credentials.Credentials
mockError error
errorStage string
}
func (c *MockKerberosClient) Login() error {
if c.errorStage == "login" && c.mockError != nil {
return c.mockError
}
c.asRepBytes = "6b8202e9308202e5a003020105a10302010ba22b30293027a103020113a220041e301c301aa003020112a1131b114" +
"558414d504c452e434f4d636c69656e74a30d1b0b4558414d504c452e434f4da4133011a003020101a10a30081b06636c69656e7" +
"4a5820156618201523082014ea003020105a10d1b0b4558414d504c452e434f4da220301ea003020102a11730151b066b7262746" +
"7741b0b4558414d504c452e434f4da382011430820110a003020112a103020101a28201020481ffdb9891175d106818e61008c51" +
"d0b3462bca92f3bf9d4cfa82de4c4d7aff9994ec87c573e3a3d54dcb2bb79618c76f2bf4a3d006f90d5bdbd049bc18f48be39203" +
"549ca02acaf63f292b12404f9b74c34b83687119d8f56552ccc0c50ebee2a53bb114c1b4619bb1d5d31f0f49b4d40a08a9b4c046" +
"2e1398d0b648be1c0e50c552ad16e1d8d8e74263dd0bf0ec591e4797dfd40a9a1be4ae830d03a306e053fd7586fef84ffc5e4a83" +
"7c3122bf3e6a40fe87e84019f6283634461b955712b44a5f7386c278bff94ec2c2dc0403247e29c2450e853471ceababf9b8911f" +
"997f2e3010b046d2c49eb438afb0f4c210821e80d4ffa4c9521eb895dcd68610b3feaa682012c30820128a003020112a282011f0" +
"482011bce73cbce3f1dd17661c412005f0f2257c756fe8e98ff97e6ec24b7bab66e5fd3a3827aeeae4757af0c6e892948122d8b2" +
"03c8df48df0ef5d142d0e416d688f11daa0fcd63d96bdd431d02b8e951c664eeff286a2be62383d274a04016d5f0e141da58cb86" +
"331de64063062f4f885e8e9ce5b181ca2fdc67897c5995e0ae1ae0c171a64493ff7bd91bc6d89cd4fce1e2b3ea0a10e34b0d5eda" +
"aa38ee727b50c5632ed1d2f2b457908e616178d0d80b72af209fb8ac9dbaa1768fa45931392b36b6d8c12400f8ded2efaa0654d0" +
"da1db966e8b5aab4706c800f95d559664646041fdb38b411c62fc0fbe0d25083a28562b0e1c8df16e62e9d5626b0addee489835f" +
"eedb0f26c05baa596b69b17f47920aa64b29dc77cfcc97ba47885"
apRepBytes, err := hex.DecodeString(c.asRepBytes)
if err != nil {
return err
}
err = c.ASRep.Unmarshal(apRepBytes)
if err != nil {
return err
}
c.credentials = credentials.New("client", "EXAMPLE.COM").WithPassword("qwerty")
_, err = c.ASRep.DecryptEncPart(c.credentials)
if err != nil {
return err
}
return nil
}
func (c *MockKerberosClient) GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error) {
if c.errorStage == "service_ticket" && c.mockError != nil {
return messages.Ticket{}, types.EncryptionKey{}, c.mockError
}
return c.ASRep.Ticket, c.ASRep.DecryptedEncPart.Key, nil
}
func (c *MockKerberosClient) Domain() string {
return "EXAMPLE.COM"
}
func (c *MockKerberosClient) CName() types.PrincipalName {
var p = types.PrincipalName{
NameType: KRB5_USER_AUTH,
NameString: []string{
"kafka",
"kafka",
},
}
return p
}
func (c *MockKerberosClient) Destroy() {
// Do nothing.
}

1121
vendor/github.com/Shopify/sarama/mockresponses.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More