add vendor
This commit is contained in:
parent
e85debddfc
commit
5d61468de6
|
@ -25,3 +25,5 @@ _testmain.go
|
|||
|
||||
coverage.txt
|
||||
profile.out
|
||||
|
||||
simplest-uncommitted-msg-0.1-jar-with-dependencies.jar
|
||||
|
|
|
@ -0,0 +1,77 @@
|
|||
run:
|
||||
timeout: 5m
|
||||
deadline: 10m
|
||||
|
||||
linters-settings:
|
||||
govet:
|
||||
check-shadowing: false
|
||||
golint:
|
||||
min-confidence: 0
|
||||
gocyclo:
|
||||
min-complexity: 99
|
||||
maligned:
|
||||
suggest-new: true
|
||||
dupl:
|
||||
threshold: 100
|
||||
goconst:
|
||||
min-len: 2
|
||||
min-occurrences: 3
|
||||
misspell:
|
||||
locale: US
|
||||
goimports:
|
||||
local-prefixes: github.com/Shopify/sarama
|
||||
gocritic:
|
||||
enabled-tags:
|
||||
- diagnostic
|
||||
- experimental
|
||||
- opinionated
|
||||
- performance
|
||||
- style
|
||||
disabled-checks:
|
||||
- wrapperFunc
|
||||
- ifElseChain
|
||||
funlen:
|
||||
lines: 300
|
||||
statements: 300
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- bodyclose
|
||||
- deadcode
|
||||
- depguard
|
||||
- dogsled
|
||||
# - dupl
|
||||
- errcheck
|
||||
- funlen
|
||||
# - gocritic
|
||||
- gocyclo
|
||||
- gofmt
|
||||
- goimports
|
||||
# - golint
|
||||
- gosec
|
||||
# - gosimple
|
||||
- govet
|
||||
# - ineffassign
|
||||
- interfacer
|
||||
# - misspell
|
||||
# - nakedret
|
||||
# - scopelint
|
||||
# - staticcheck
|
||||
- structcheck
|
||||
# - stylecheck
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unused
|
||||
- varcheck
|
||||
- whitespace
|
||||
# - goconst
|
||||
- gochecknoinits
|
||||
|
||||
issues:
|
||||
exclude:
|
||||
- consider giving a name to these results
|
||||
- include an explanation for nolint directive
|
||||
- Potential Integer overflow made by strconv.Atoi result conversion to int16/32
|
||||
- Use of weak random number generator
|
||||
- TLS MinVersion too low
|
|
@ -1,36 +0,0 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.9.7
|
||||
- 1.10.4
|
||||
- 1.11
|
||||
|
||||
env:
|
||||
global:
|
||||
- KAFKA_PEERS=localhost:9091,localhost:9092,localhost:9093,localhost:9094,localhost:9095
|
||||
- TOXIPROXY_ADDR=http://localhost:8474
|
||||
- KAFKA_INSTALL_ROOT=/home/travis/kafka
|
||||
- KAFKA_HOSTNAME=localhost
|
||||
- DEBUG=true
|
||||
matrix:
|
||||
- KAFKA_VERSION=1.0.0
|
||||
- KAFKA_VERSION=1.1.0
|
||||
- KAFKA_VERSION=2.0.0
|
||||
|
||||
before_install:
|
||||
- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR}
|
||||
- vagrant/install_cluster.sh
|
||||
- vagrant/boot_cluster.sh
|
||||
- vagrant/create_topics.sh
|
||||
|
||||
install: make install_dependencies
|
||||
|
||||
script:
|
||||
- make test
|
||||
- make vet
|
||||
- make errcheck
|
||||
- if [ "$TRAVIS_GO_VERSION" = "1.11" ]; then make fmt; fi
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
|
||||
after_script: vagrant/halt_cluster.sh
|
|
@ -1,5 +1,407 @@
|
|||
# Changelog
|
||||
|
||||
#### Unreleased
|
||||
|
||||
#### Version 1.27.1 (2020-10-07)
|
||||
|
||||
# Improvements
|
||||
|
||||
#1775 - @d1egoaz - Adds a Producer Interceptor example
|
||||
#1781 - @justin-chen - Refresh brokers given list of seed brokers
|
||||
#1784 - @justin-chen - Add randomize seed broker method
|
||||
#1790 - @d1egoaz - remove example binary
|
||||
#1798 - @bai - Test against Go 1.15
|
||||
#1785 - @justin-chen - Add private method to Client interface to prevent implementation
|
||||
#1802 - @uvw - Support Go 1.13 error unwrapping
|
||||
|
||||
# Fixes
|
||||
|
||||
#1791 - @stanislavkozlovski - bump default version to 1.0.0
|
||||
|
||||
#### Version 1.27.0 (2020-08-11)
|
||||
|
||||
# Improvements
|
||||
|
||||
#1466 - @rubenvp8510 - Expose kerberos fast negotiation configuration
|
||||
#1695 - @KJTsanaktsidis - Use docker-compose to run the functional tests
|
||||
#1699 - @wclaeys - Consumer group support for manually comitting offsets
|
||||
#1714 - @bai - Bump Go to version 1.14.3, golangci-lint to 1.27.0
|
||||
#1726 - @d1egoaz - Include zstd on the functional tests
|
||||
#1730 - @d1egoaz - KIP-42 Add producer and consumer interceptors
|
||||
#1738 - @varun06 - fixed variable names that are named same as some std lib package names
|
||||
#1741 - @varun06 - updated zstd dependency to latest v1.10.10
|
||||
#1743 - @varun06 - Fixed declaration dependencies and other lint issues in code base
|
||||
#1763 - @alrs - remove deprecated tls options from test
|
||||
#1769 - @bai - Add support for Kafka 2.6.0
|
||||
|
||||
# Fixes
|
||||
|
||||
#1697 - @kvch - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication
|
||||
#1744 - @alrs - Fix isBalanced Function Signature
|
||||
|
||||
#### Version 1.26.4 (2020-05-19)
|
||||
|
||||
# Fixes
|
||||
|
||||
- #1701 - @d1egoaz - Set server name only for the current broker
|
||||
- #1694 - @dnwe - testfix: set KAFKA_HEAP_OPTS for zk and kafka
|
||||
|
||||
#### Version 1.26.3 (2020-05-07)
|
||||
|
||||
# Fixes
|
||||
|
||||
- #1692 - @d1egoaz - Set tls ServerName to fix issue: either ServerName or InsecureSkipVerify must be specified in the tls.Config
|
||||
|
||||
#### Version 1.26.2 (2020-05-06)
|
||||
|
||||
# ⚠️ Known Issues
|
||||
|
||||
This release has been marked as not ready for production and may be unstable, please use v1.26.4.
|
||||
|
||||
# Improvements
|
||||
|
||||
- #1560 - @iyacontrol - add sync pool for gzip 1-9
|
||||
- #1605 - @dnwe - feat: protocol support for V11 fetch w/ rackID
|
||||
- #1617 - @sladkoff / @dwi-di / @random-dwi - Add support for alter/list partition reassignements APIs
|
||||
- #1632 - @bai - Add support for Go 1.14
|
||||
- #1640 - @random-dwi - Feature/fix list partition reassignments
|
||||
- #1646 - @mimaison - Add DescribeLogDirs to admin client
|
||||
- #1667 - @bai - Add support for kafka 2.5.0
|
||||
|
||||
# Fixes
|
||||
|
||||
- #1594 - @sladkoff - Sets ConfigEntry.Default flag in addition to the ConfigEntry.Source for Kafka versions > V1_1_0_0
|
||||
- #1601 - @alrs - fix: remove use of testing.T.FailNow() inside goroutine
|
||||
- #1602 - @d1egoaz - adds a note about consumer groups Consume method
|
||||
- #1607 - @darklore - Fix memory leak when Broker.Open and Broker.Close called repeatedly
|
||||
- #1613 - @wblakecaldwell - Updated "retrying" log message when BackoffFunc implemented
|
||||
- #1614 - @alrs - produce_response.go: Remove Unused Functions
|
||||
- #1619 - @alrs - tools/kafka-producer-performance: prune unused flag variables
|
||||
- #1639 - @agriffaut - Handle errors with no message but error code
|
||||
- #1643 - @kzinglzy - fix `config.net.keepalive`
|
||||
- #1644 - @KJTsanaktsidis - Fix brokers continually allocating new Session IDs
|
||||
- #1645 - @Stephan14 - Remove broker(s) which no longer exist in metadata
|
||||
- #1650 - @lavoiesl - Return the response error in heartbeatLoop
|
||||
- #1661 - @KJTsanaktsidis - Fix "broker received out of order sequence" when brokers die
|
||||
- #1666 - @KevinJCross - Bugfix: Allow TLS connections to work over socks proxy.
|
||||
|
||||
#### Version 1.26.1 (2020-02-04)
|
||||
|
||||
Improvements:
|
||||
- Add requests-in-flight metric ([1539](https://github.com/Shopify/sarama/pull/1539))
|
||||
- Fix misleading example for cluster admin ([1595](https://github.com/Shopify/sarama/pull/1595))
|
||||
- Replace Travis with GitHub Actions, linters housekeeping ([1573](https://github.com/Shopify/sarama/pull/1573))
|
||||
- Allow BalanceStrategy to provide custom assignment data ([1592](https://github.com/Shopify/sarama/pull/1592))
|
||||
|
||||
Bug Fixes:
|
||||
- Adds back Consumer.Offsets.CommitInterval to fix API ([1590](https://github.com/Shopify/sarama/pull/1590))
|
||||
- Fix error message s/CommitInterval/AutoCommit.Interval ([1589](https://github.com/Shopify/sarama/pull/1589))
|
||||
|
||||
#### Version 1.26.0 (2020-01-24)
|
||||
|
||||
New Features:
|
||||
- Enable zstd compression
|
||||
([1574](https://github.com/Shopify/sarama/pull/1574),
|
||||
[1582](https://github.com/Shopify/sarama/pull/1582))
|
||||
- Support headers in tools kafka-console-producer
|
||||
([1549](https://github.com/Shopify/sarama/pull/1549))
|
||||
|
||||
Improvements:
|
||||
- Add SASL AuthIdentity to SASL frames (authzid)
|
||||
([1585](https://github.com/Shopify/sarama/pull/1585)).
|
||||
|
||||
Bug Fixes:
|
||||
- Sending messages with ZStd compression enabled fails in multiple ways
|
||||
([1252](https://github.com/Shopify/sarama/issues/1252)).
|
||||
- Use the broker for any admin on BrokerConfig
|
||||
([1571](https://github.com/Shopify/sarama/pull/1571)).
|
||||
- Set DescribeConfigRequest Version field
|
||||
([1576](https://github.com/Shopify/sarama/pull/1576)).
|
||||
- ConsumerGroup flooding logs with client/metadata update req
|
||||
([1578](https://github.com/Shopify/sarama/pull/1578)).
|
||||
- MetadataRequest version in DescribeCluster
|
||||
([1580](https://github.com/Shopify/sarama/pull/1580)).
|
||||
- Fix deadlock in consumer group handleError
|
||||
([1581](https://github.com/Shopify/sarama/pull/1581))
|
||||
- Fill in the Fetch{Request,Response} protocol
|
||||
([1582](https://github.com/Shopify/sarama/pull/1582)).
|
||||
- Retry topic request on ControllerNotAvailable
|
||||
([1586](https://github.com/Shopify/sarama/pull/1586)).
|
||||
|
||||
#### Version 1.25.0 (2020-01-13)
|
||||
|
||||
New Features:
|
||||
- Support TLS protocol in kafka-producer-performance
|
||||
([1538](https://github.com/Shopify/sarama/pull/1538)).
|
||||
- Add support for kafka 2.4.0
|
||||
([1552](https://github.com/Shopify/sarama/pull/1552)).
|
||||
|
||||
Improvements:
|
||||
- Allow the Consumer to disable auto-commit offsets
|
||||
([1164](https://github.com/Shopify/sarama/pull/1164)).
|
||||
- Produce records with consistent timestamps
|
||||
([1455](https://github.com/Shopify/sarama/pull/1455)).
|
||||
|
||||
Bug Fixes:
|
||||
- Fix incorrect SetTopicMetadata name mentions
|
||||
([1534](https://github.com/Shopify/sarama/pull/1534)).
|
||||
- Fix client.tryRefreshMetadata Println
|
||||
([1535](https://github.com/Shopify/sarama/pull/1535)).
|
||||
- Fix panic on calling updateMetadata on closed client
|
||||
([1531](https://github.com/Shopify/sarama/pull/1531)).
|
||||
- Fix possible faulty metrics in TestFuncProducing
|
||||
([1545](https://github.com/Shopify/sarama/pull/1545)).
|
||||
|
||||
#### Version 1.24.1 (2019-10-31)
|
||||
|
||||
New Features:
|
||||
- Add DescribeLogDirs Request/Response pair
|
||||
([1520](https://github.com/Shopify/sarama/pull/1520)).
|
||||
|
||||
Bug Fixes:
|
||||
- Fix ClusterAdmin returning invalid controller ID on DescribeCluster
|
||||
([1518](https://github.com/Shopify/sarama/pull/1518)).
|
||||
- Fix issue with consumergroup not rebalancing when new partition is added
|
||||
([1525](https://github.com/Shopify/sarama/pull/1525)).
|
||||
- Ensure consistent use of read/write deadlines
|
||||
([1529](https://github.com/Shopify/sarama/pull/1529)).
|
||||
|
||||
#### Version 1.24.0 (2019-10-09)
|
||||
|
||||
New Features:
|
||||
- Add sticky partition assignor
|
||||
([1416](https://github.com/Shopify/sarama/pull/1416)).
|
||||
- Switch from cgo zstd package to pure Go implementation
|
||||
([1477](https://github.com/Shopify/sarama/pull/1477)).
|
||||
|
||||
Improvements:
|
||||
- Allow creating ClusterAdmin from client
|
||||
([1415](https://github.com/Shopify/sarama/pull/1415)).
|
||||
- Set KafkaVersion in ListAcls method
|
||||
([1452](https://github.com/Shopify/sarama/pull/1452)).
|
||||
- Set request version in CreateACL ClusterAdmin method
|
||||
([1458](https://github.com/Shopify/sarama/pull/1458)).
|
||||
- Set request version in DeleteACL ClusterAdmin method
|
||||
([1461](https://github.com/Shopify/sarama/pull/1461)).
|
||||
- Handle missed error codes on TopicMetaDataRequest and GroupCoordinatorRequest
|
||||
([1464](https://github.com/Shopify/sarama/pull/1464)).
|
||||
- Remove direct usage of gofork
|
||||
([1465](https://github.com/Shopify/sarama/pull/1465)).
|
||||
- Add support for Go 1.13
|
||||
([1478](https://github.com/Shopify/sarama/pull/1478)).
|
||||
- Improve behavior of NewMockListAclsResponse
|
||||
([1481](https://github.com/Shopify/sarama/pull/1481)).
|
||||
|
||||
Bug Fixes:
|
||||
- Fix race condition in consumergroup example
|
||||
([1434](https://github.com/Shopify/sarama/pull/1434)).
|
||||
- Fix brokerProducer goroutine leak
|
||||
([1442](https://github.com/Shopify/sarama/pull/1442)).
|
||||
- Use released version of lz4 library
|
||||
([1469](https://github.com/Shopify/sarama/pull/1469)).
|
||||
- Set correct version in MockDeleteTopicsResponse
|
||||
([1484](https://github.com/Shopify/sarama/pull/1484)).
|
||||
- Fix CLI help message typo
|
||||
([1494](https://github.com/Shopify/sarama/pull/1494)).
|
||||
|
||||
Known Issues:
|
||||
- Please **don't** use Zstd, as it doesn't work right now.
|
||||
See https://github.com/Shopify/sarama/issues/1252
|
||||
|
||||
#### Version 1.23.1 (2019-07-22)
|
||||
|
||||
Bug Fixes:
|
||||
- Fix fetch delete bug record
|
||||
([1425](https://github.com/Shopify/sarama/pull/1425)).
|
||||
- Handle SASL/OAUTHBEARER token rejection
|
||||
([1428](https://github.com/Shopify/sarama/pull/1428)).
|
||||
|
||||
#### Version 1.23.0 (2019-07-02)
|
||||
|
||||
New Features:
|
||||
- Add support for Kafka 2.3.0
|
||||
([1418](https://github.com/Shopify/sarama/pull/1418)).
|
||||
- Add support for ListConsumerGroupOffsets v2
|
||||
([1374](https://github.com/Shopify/sarama/pull/1374)).
|
||||
- Add support for DeleteConsumerGroup
|
||||
([1417](https://github.com/Shopify/sarama/pull/1417)).
|
||||
- Add support for SASLVersion configuration
|
||||
([1410](https://github.com/Shopify/sarama/pull/1410)).
|
||||
- Add kerberos support
|
||||
([1366](https://github.com/Shopify/sarama/pull/1366)).
|
||||
|
||||
Improvements:
|
||||
- Improve sasl_scram_client example
|
||||
([1406](https://github.com/Shopify/sarama/pull/1406)).
|
||||
- Fix shutdown and race-condition in consumer-group example
|
||||
([1404](https://github.com/Shopify/sarama/pull/1404)).
|
||||
- Add support for error codes 77—81
|
||||
([1397](https://github.com/Shopify/sarama/pull/1397)).
|
||||
- Pool internal objects allocated per message
|
||||
([1385](https://github.com/Shopify/sarama/pull/1385)).
|
||||
- Reduce packet decoder allocations
|
||||
([1373](https://github.com/Shopify/sarama/pull/1373)).
|
||||
- Support timeout when fetching metadata
|
||||
([1359](https://github.com/Shopify/sarama/pull/1359)).
|
||||
|
||||
Bug Fixes:
|
||||
- Fix fetch size integer overflow
|
||||
([1376](https://github.com/Shopify/sarama/pull/1376)).
|
||||
- Handle and log throttled FetchResponses
|
||||
([1383](https://github.com/Shopify/sarama/pull/1383)).
|
||||
- Refactor misspelled word Resouce to Resource
|
||||
([1368](https://github.com/Shopify/sarama/pull/1368)).
|
||||
|
||||
#### Version 1.22.1 (2019-04-29)
|
||||
|
||||
Improvements:
|
||||
- Use zstd 1.3.8
|
||||
([1350](https://github.com/Shopify/sarama/pull/1350)).
|
||||
- Add support for SaslHandshakeRequest v1
|
||||
([1354](https://github.com/Shopify/sarama/pull/1354)).
|
||||
|
||||
Bug Fixes:
|
||||
- Fix V5 MetadataRequest nullable topics array
|
||||
([1353](https://github.com/Shopify/sarama/pull/1353)).
|
||||
- Use a different SCRAM client for each broker connection
|
||||
([1349](https://github.com/Shopify/sarama/pull/1349)).
|
||||
- Fix AllowAutoTopicCreation for MetadataRequest greater than v3
|
||||
([1344](https://github.com/Shopify/sarama/pull/1344)).
|
||||
|
||||
#### Version 1.22.0 (2019-04-09)
|
||||
|
||||
New Features:
|
||||
- Add Offline Replicas Operation to Client
|
||||
([1318](https://github.com/Shopify/sarama/pull/1318)).
|
||||
- Allow using proxy when connecting to broker
|
||||
([1326](https://github.com/Shopify/sarama/pull/1326)).
|
||||
- Implement ReadCommitted
|
||||
([1307](https://github.com/Shopify/sarama/pull/1307)).
|
||||
- Add support for Kafka 2.2.0
|
||||
([1331](https://github.com/Shopify/sarama/pull/1331)).
|
||||
- Add SASL SCRAM-SHA-512 and SCRAM-SHA-256 mechanismes
|
||||
([1331](https://github.com/Shopify/sarama/pull/1295)).
|
||||
|
||||
Improvements:
|
||||
- Unregister all broker metrics on broker stop
|
||||
([1232](https://github.com/Shopify/sarama/pull/1232)).
|
||||
- Add SCRAM authentication example
|
||||
([1303](https://github.com/Shopify/sarama/pull/1303)).
|
||||
- Add consumergroup examples
|
||||
([1304](https://github.com/Shopify/sarama/pull/1304)).
|
||||
- Expose consumer batch size metric
|
||||
([1296](https://github.com/Shopify/sarama/pull/1296)).
|
||||
- Add TLS options to console producer and consumer
|
||||
([1300](https://github.com/Shopify/sarama/pull/1300)).
|
||||
- Reduce client close bookkeeping
|
||||
([1297](https://github.com/Shopify/sarama/pull/1297)).
|
||||
- Satisfy error interface in create responses
|
||||
([1154](https://github.com/Shopify/sarama/pull/1154)).
|
||||
- Please lint gods
|
||||
([1346](https://github.com/Shopify/sarama/pull/1346)).
|
||||
|
||||
Bug Fixes:
|
||||
- Fix multi consumer group instance crash
|
||||
([1338](https://github.com/Shopify/sarama/pull/1338)).
|
||||
- Update lz4 to latest version
|
||||
([1347](https://github.com/Shopify/sarama/pull/1347)).
|
||||
- Retry ErrNotCoordinatorForConsumer in new consumergroup session
|
||||
([1231](https://github.com/Shopify/sarama/pull/1231)).
|
||||
- Fix cleanup error handler
|
||||
([1332](https://github.com/Shopify/sarama/pull/1332)).
|
||||
- Fix rate condition in PartitionConsumer
|
||||
([1156](https://github.com/Shopify/sarama/pull/1156)).
|
||||
|
||||
#### Version 1.21.0 (2019-02-24)
|
||||
|
||||
New Features:
|
||||
- Add CreateAclRequest, DescribeAclRequest, DeleteAclRequest
|
||||
([1236](https://github.com/Shopify/sarama/pull/1236)).
|
||||
- Add DescribeTopic, DescribeConsumerGroup, ListConsumerGroups, ListConsumerGroupOffsets admin requests
|
||||
([1178](https://github.com/Shopify/sarama/pull/1178)).
|
||||
- Implement SASL/OAUTHBEARER
|
||||
([1240](https://github.com/Shopify/sarama/pull/1240)).
|
||||
|
||||
Improvements:
|
||||
- Add Go mod support
|
||||
([1282](https://github.com/Shopify/sarama/pull/1282)).
|
||||
- Add error codes 73—76
|
||||
([1239](https://github.com/Shopify/sarama/pull/1239)).
|
||||
- Add retry backoff function
|
||||
([1160](https://github.com/Shopify/sarama/pull/1160)).
|
||||
- Maintain metadata in the producer even when retries are disabled
|
||||
([1189](https://github.com/Shopify/sarama/pull/1189)).
|
||||
- Include ReplicaAssignment in ListTopics
|
||||
([1274](https://github.com/Shopify/sarama/pull/1274)).
|
||||
- Add producer performance tool
|
||||
([1222](https://github.com/Shopify/sarama/pull/1222)).
|
||||
- Add support LogAppend timestamps
|
||||
([1258](https://github.com/Shopify/sarama/pull/1258)).
|
||||
|
||||
Bug Fixes:
|
||||
- Fix potential deadlock when a heartbeat request fails
|
||||
([1286](https://github.com/Shopify/sarama/pull/1286)).
|
||||
- Fix consuming compacted topic
|
||||
([1227](https://github.com/Shopify/sarama/pull/1227)).
|
||||
- Set correct Kafka version for DescribeConfigsRequest v1
|
||||
([1277](https://github.com/Shopify/sarama/pull/1277)).
|
||||
- Update kafka test version
|
||||
([1273](https://github.com/Shopify/sarama/pull/1273)).
|
||||
|
||||
#### Version 1.20.1 (2019-01-10)
|
||||
|
||||
New Features:
|
||||
- Add optional replica id in offset request
|
||||
([1100](https://github.com/Shopify/sarama/pull/1100)).
|
||||
|
||||
Improvements:
|
||||
- Implement DescribeConfigs Request + Response v1 & v2
|
||||
([1230](https://github.com/Shopify/sarama/pull/1230)).
|
||||
- Reuse compression objects
|
||||
([1185](https://github.com/Shopify/sarama/pull/1185)).
|
||||
- Switch from png to svg for GoDoc link in README
|
||||
([1243](https://github.com/Shopify/sarama/pull/1243)).
|
||||
- Fix typo in deprecation notice for FetchResponseBlock.Records
|
||||
([1242](https://github.com/Shopify/sarama/pull/1242)).
|
||||
- Fix typos in consumer metadata response file
|
||||
([1244](https://github.com/Shopify/sarama/pull/1244)).
|
||||
|
||||
Bug Fixes:
|
||||
- Revert to individual msg retries for non-idempotent
|
||||
([1203](https://github.com/Shopify/sarama/pull/1203)).
|
||||
- Respect MaxMessageBytes limit for uncompressed messages
|
||||
([1141](https://github.com/Shopify/sarama/pull/1141)).
|
||||
|
||||
#### Version 1.20.0 (2018-12-10)
|
||||
|
||||
New Features:
|
||||
- Add support for zstd compression
|
||||
([#1170](https://github.com/Shopify/sarama/pull/1170)).
|
||||
- Add support for Idempotent Producer
|
||||
([#1152](https://github.com/Shopify/sarama/pull/1152)).
|
||||
- Add support support for Kafka 2.1.0
|
||||
([#1229](https://github.com/Shopify/sarama/pull/1229)).
|
||||
- Add support support for OffsetCommit request/response pairs versions v1 to v5
|
||||
([#1201](https://github.com/Shopify/sarama/pull/1201)).
|
||||
- Add support support for OffsetFetch request/response pair up to version v5
|
||||
([#1198](https://github.com/Shopify/sarama/pull/1198)).
|
||||
|
||||
Improvements:
|
||||
- Export broker's Rack setting
|
||||
([#1173](https://github.com/Shopify/sarama/pull/1173)).
|
||||
- Always use latest patch version of Go on CI
|
||||
([#1202](https://github.com/Shopify/sarama/pull/1202)).
|
||||
- Add error codes 61 to 72
|
||||
([#1195](https://github.com/Shopify/sarama/pull/1195)).
|
||||
|
||||
Bug Fixes:
|
||||
- Fix build without cgo
|
||||
([#1182](https://github.com/Shopify/sarama/pull/1182)).
|
||||
- Fix go vet suggestion in consumer group file
|
||||
([#1209](https://github.com/Shopify/sarama/pull/1209)).
|
||||
- Fix typos in code and comments
|
||||
([#1228](https://github.com/Shopify/sarama/pull/1228)).
|
||||
|
||||
#### Version 1.19.0 (2018-09-27)
|
||||
|
||||
New Features:
|
||||
|
|
|
@ -1,30 +1,31 @@
|
|||
default: fmt vet errcheck test
|
||||
default: fmt get update test lint
|
||||
|
||||
# Taken from https://github.com/codecov/example-go#caveat-multiple-files
|
||||
test:
|
||||
echo "" > coverage.txt
|
||||
for d in `go list ./... | grep -v vendor`; do \
|
||||
go test -p 1 -v -timeout 240s -race -coverprofile=profile.out -covermode=atomic $$d || exit 1; \
|
||||
if [ -f profile.out ]; then \
|
||||
cat profile.out >> coverage.txt; \
|
||||
rm profile.out; \
|
||||
fi \
|
||||
done
|
||||
GO := GO111MODULE=on GOPRIVATE=github.com/linkedin GOSUMDB=off go
|
||||
GOBUILD := CGO_ENABLED=0 $(GO) build $(BUILD_FLAG)
|
||||
GOTEST := $(GO) test -gcflags='-l' -p 3 -v -race -timeout 6m -coverprofile=profile.out -covermode=atomic
|
||||
|
||||
vet:
|
||||
go vet ./...
|
||||
|
||||
# See https://github.com/kisielk/errcheck/pull/141 for details on ignorepkg
|
||||
errcheck:
|
||||
errcheck -ignorepkg fmt github.com/Shopify/sarama/...
|
||||
|
||||
fmt:
|
||||
@if [ -n "$$(go fmt ./...)" ]; then echo 'Please run go fmt on your code.' && exit 1; fi
|
||||
|
||||
install_dependencies: install_errcheck get
|
||||
|
||||
install_errcheck:
|
||||
go get github.com/kisielk/errcheck
|
||||
FILES := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -not -name '*_test.go')
|
||||
TESTS := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -name '*_test.go')
|
||||
|
||||
get:
|
||||
go get -t
|
||||
$(GO) get ./...
|
||||
$(GO) mod verify
|
||||
$(GO) mod tidy
|
||||
|
||||
update:
|
||||
$(GO) get -u -v all
|
||||
$(GO) mod verify
|
||||
$(GO) mod tidy
|
||||
|
||||
fmt:
|
||||
gofmt -s -l -w $(FILES) $(TESTS)
|
||||
|
||||
lint:
|
||||
GOFLAGS="-tags=functional" golangci-lint run
|
||||
|
||||
test:
|
||||
$(GOTEST) ./...
|
||||
|
||||
.PHONY: test_functional
|
||||
test_functional:
|
||||
$(GOTEST) -tags=functional ./...
|
||||
|
|
|
@ -1,13 +1,12 @@
|
|||
sarama
|
||||
======
|
||||
# sarama
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.png)](https://godoc.org/github.com/Shopify/sarama)
|
||||
[![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.svg)](https://godoc.org/github.com/Shopify/sarama)
|
||||
[![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama)
|
||||
[![Coverage](https://codecov.io/gh/Shopify/sarama/branch/master/graph/badge.svg)](https://codecov.io/gh/Shopify/sarama)
|
||||
|
||||
Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later).
|
||||
|
||||
### Getting started
|
||||
## Getting started
|
||||
|
||||
- API documentation and examples are available via [godoc](https://godoc.org/github.com/Shopify/sarama).
|
||||
- Mocks for testing are available in the [mocks](./mocks) subpackage.
|
||||
|
@ -16,24 +15,22 @@ Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apa
|
|||
|
||||
You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions).
|
||||
|
||||
### Compatibility and API stability
|
||||
## Compatibility and API stability
|
||||
|
||||
Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
|
||||
the two latest stable releases of Kafka and Go, and we provide a two month
|
||||
grace period for older releases. This means we currently officially support
|
||||
Go 1.8 through 1.11, and Kafka 1.0 through 2.0, although older releases are
|
||||
Go 1.13 through 1.14, and Kafka 2.4 through 2.6, although older releases are
|
||||
still likely to work.
|
||||
|
||||
Sarama follows semantic versioning and provides API stability via the gopkg.in service.
|
||||
You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1.
|
||||
A changelog is available [here](CHANGELOG.md).
|
||||
|
||||
### Contributing
|
||||
## Contributing
|
||||
|
||||
* Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md).
|
||||
* Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more
|
||||
technical and design details.
|
||||
* The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol)
|
||||
contains a wealth of useful information.
|
||||
* For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers.
|
||||
* If you have any questions, just ask!
|
||||
- Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md).
|
||||
- Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more technical and design details.
|
||||
- The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) contains a wealth of useful information.
|
||||
- For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers.
|
||||
- If you have any questions, just ask!
|
||||
|
|
|
@ -1,14 +1,8 @@
|
|||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
|
||||
VAGRANTFILE_API_VERSION = "2"
|
||||
|
||||
# We have 5 * 192MB ZK processes and 5 * 320MB Kafka processes => 2560MB
|
||||
MEMORY = 3072
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
config.vm.box = "ubuntu/trusty64"
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "ubuntu/bionic64"
|
||||
|
||||
config.vm.provision :shell, path: "vagrant/provision.sh"
|
||||
|
||||
|
|
|
@ -1,17 +1,27 @@
|
|||
package sarama
|
||||
|
||||
//Resource holds information about acl resource type
|
||||
type Resource struct {
|
||||
ResourceType AclResourceType
|
||||
ResourceName string
|
||||
ResourceType AclResourceType
|
||||
ResourceName string
|
||||
ResourcePatternType AclResourcePatternType
|
||||
}
|
||||
|
||||
func (r *Resource) encode(pe packetEncoder) error {
|
||||
func (r *Resource) encode(pe packetEncoder, version int16) error {
|
||||
pe.putInt8(int8(r.ResourceType))
|
||||
|
||||
if err := pe.putString(r.ResourceName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if version == 1 {
|
||||
if r.ResourcePatternType == AclPatternUnknown {
|
||||
Logger.Print("Cannot encode an unknown resource pattern type, using Literal instead")
|
||||
r.ResourcePatternType = AclPatternLiteral
|
||||
}
|
||||
pe.putInt8(int8(r.ResourcePatternType))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -25,10 +35,18 @@ func (r *Resource) decode(pd packetDecoder, version int16) (err error) {
|
|||
if r.ResourceName, err = pd.getString(); err != nil {
|
||||
return err
|
||||
}
|
||||
if version == 1 {
|
||||
pattern, err := pd.getInt8()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.ResourcePatternType = AclResourcePatternType(pattern)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//Acl holds information about acl type
|
||||
type Acl struct {
|
||||
Principal string
|
||||
Host string
|
||||
|
@ -75,13 +93,14 @@ func (a *Acl) decode(pd packetDecoder, version int16) (err error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
//ResourceAcls is an acl resource type
|
||||
type ResourceAcls struct {
|
||||
Resource
|
||||
Acls []*Acl
|
||||
}
|
||||
|
||||
func (r *ResourceAcls) encode(pe packetEncoder) error {
|
||||
if err := r.Resource.encode(pe); err != nil {
|
||||
func (r *ResourceAcls) encode(pe packetEncoder, version int16) error {
|
||||
if err := r.Resource.encode(pe, version); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package sarama
|
||||
|
||||
//CreateAclsRequest is an acl creation request
|
||||
type CreateAclsRequest struct {
|
||||
Version int16
|
||||
AclCreations []*AclCreation
|
||||
}
|
||||
|
||||
|
@ -10,7 +12,7 @@ func (c *CreateAclsRequest) encode(pe packetEncoder) error {
|
|||
}
|
||||
|
||||
for _, aclCreation := range c.AclCreations {
|
||||
if err := aclCreation.encode(pe); err != nil {
|
||||
if err := aclCreation.encode(pe, c.Version); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -19,6 +21,7 @@ func (c *CreateAclsRequest) encode(pe packetEncoder) error {
|
|||
}
|
||||
|
||||
func (c *CreateAclsRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
c.Version = version
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -36,25 +39,35 @@ func (c *CreateAclsRequest) decode(pd packetDecoder, version int16) (err error)
|
|||
return nil
|
||||
}
|
||||
|
||||
func (d *CreateAclsRequest) key() int16 {
|
||||
func (c *CreateAclsRequest) key() int16 {
|
||||
return 30
|
||||
}
|
||||
|
||||
func (d *CreateAclsRequest) version() int16 {
|
||||
return 0
|
||||
func (c *CreateAclsRequest) version() int16 {
|
||||
return c.Version
|
||||
}
|
||||
|
||||
func (d *CreateAclsRequest) requiredVersion() KafkaVersion {
|
||||
return V0_11_0_0
|
||||
func (c *CreateAclsRequest) headerVersion() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (c *CreateAclsRequest) requiredVersion() KafkaVersion {
|
||||
switch c.Version {
|
||||
case 1:
|
||||
return V2_0_0_0
|
||||
default:
|
||||
return V0_11_0_0
|
||||
}
|
||||
}
|
||||
|
||||
//AclCreation is a wrapper around Resource and Acl type
|
||||
type AclCreation struct {
|
||||
Resource
|
||||
Acl
|
||||
}
|
||||
|
||||
func (a *AclCreation) encode(pe packetEncoder) error {
|
||||
if err := a.Resource.encode(pe); err != nil {
|
||||
func (a *AclCreation) encode(pe packetEncoder, version int16) error {
|
||||
if err := a.Resource.encode(pe, version); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := a.Acl.encode(pe); err != nil {
|
||||
|
|
|
@ -2,6 +2,7 @@ package sarama
|
|||
|
||||
import "time"
|
||||
|
||||
//CreateAclsResponse is a an acl response creation type
|
||||
type CreateAclsResponse struct {
|
||||
ThrottleTime time.Duration
|
||||
AclCreationResponses []*AclCreationResponse
|
||||
|
@ -46,18 +47,23 @@ func (c *CreateAclsResponse) decode(pd packetDecoder, version int16) (err error)
|
|||
return nil
|
||||
}
|
||||
|
||||
func (d *CreateAclsResponse) key() int16 {
|
||||
func (c *CreateAclsResponse) key() int16 {
|
||||
return 30
|
||||
}
|
||||
|
||||
func (d *CreateAclsResponse) version() int16 {
|
||||
func (c *CreateAclsResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (d *CreateAclsResponse) requiredVersion() KafkaVersion {
|
||||
func (c *CreateAclsResponse) headerVersion() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (c *CreateAclsResponse) requiredVersion() KafkaVersion {
|
||||
return V0_11_0_0
|
||||
}
|
||||
|
||||
//AclCreationResponse is an acl creation response type
|
||||
type AclCreationResponse struct {
|
||||
Err KError
|
||||
ErrMsg *string
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package sarama
|
||||
|
||||
//DeleteAclsRequest is a delete acl request
|
||||
type DeleteAclsRequest struct {
|
||||
Version int
|
||||
Filters []*AclFilter
|
||||
}
|
||||
|
||||
|
@ -10,6 +12,7 @@ func (d *DeleteAclsRequest) encode(pe packetEncoder) error {
|
|||
}
|
||||
|
||||
for _, filter := range d.Filters {
|
||||
filter.Version = d.Version
|
||||
if err := filter.encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -19,6 +22,7 @@ func (d *DeleteAclsRequest) encode(pe packetEncoder) error {
|
|||
}
|
||||
|
||||
func (d *DeleteAclsRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
d.Version = int(version)
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -27,6 +31,7 @@ func (d *DeleteAclsRequest) decode(pd packetDecoder, version int16) (err error)
|
|||
d.Filters = make([]*AclFilter, n)
|
||||
for i := 0; i < n; i++ {
|
||||
d.Filters[i] = new(AclFilter)
|
||||
d.Filters[i].Version = int(version)
|
||||
if err := d.Filters[i].decode(pd, version); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -40,9 +45,18 @@ func (d *DeleteAclsRequest) key() int16 {
|
|||
}
|
||||
|
||||
func (d *DeleteAclsRequest) version() int16 {
|
||||
return 0
|
||||
return int16(d.Version)
|
||||
}
|
||||
|
||||
func (c *DeleteAclsRequest) headerVersion() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (d *DeleteAclsRequest) requiredVersion() KafkaVersion {
|
||||
return V0_11_0_0
|
||||
switch d.Version {
|
||||
case 1:
|
||||
return V2_0_0_0
|
||||
default:
|
||||
return V0_11_0_0
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,20 +2,22 @@ package sarama
|
|||
|
||||
import "time"
|
||||
|
||||
//DeleteAclsResponse is a delete acl response
|
||||
type DeleteAclsResponse struct {
|
||||
Version int16
|
||||
ThrottleTime time.Duration
|
||||
FilterResponses []*FilterResponse
|
||||
}
|
||||
|
||||
func (a *DeleteAclsResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
|
||||
func (d *DeleteAclsResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
|
||||
|
||||
if err := pe.putArrayLength(len(a.FilterResponses)); err != nil {
|
||||
if err := pe.putArrayLength(len(d.FilterResponses)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, filterResponse := range a.FilterResponses {
|
||||
if err := filterResponse.encode(pe); err != nil {
|
||||
for _, filterResponse := range d.FilterResponses {
|
||||
if err := filterResponse.encode(pe, d.Version); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -23,22 +25,22 @@ func (a *DeleteAclsResponse) encode(pe packetEncoder) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (a *DeleteAclsResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
func (d *DeleteAclsResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
throttleTime, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
|
||||
d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
|
||||
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.FilterResponses = make([]*FilterResponse, n)
|
||||
d.FilterResponses = make([]*FilterResponse, n)
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
a.FilterResponses[i] = new(FilterResponse)
|
||||
if err := a.FilterResponses[i].decode(pd, version); err != nil {
|
||||
d.FilterResponses[i] = new(FilterResponse)
|
||||
if err := d.FilterResponses[i].decode(pd, version); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -51,6 +53,10 @@ func (d *DeleteAclsResponse) key() int16 {
|
|||
}
|
||||
|
||||
func (d *DeleteAclsResponse) version() int16 {
|
||||
return d.Version
|
||||
}
|
||||
|
||||
func (d *DeleteAclsResponse) headerVersion() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
|
@ -58,13 +64,14 @@ func (d *DeleteAclsResponse) requiredVersion() KafkaVersion {
|
|||
return V0_11_0_0
|
||||
}
|
||||
|
||||
//FilterResponse is a filter response type
|
||||
type FilterResponse struct {
|
||||
Err KError
|
||||
ErrMsg *string
|
||||
MatchingAcls []*MatchingAcl
|
||||
}
|
||||
|
||||
func (f *FilterResponse) encode(pe packetEncoder) error {
|
||||
func (f *FilterResponse) encode(pe packetEncoder, version int16) error {
|
||||
pe.putInt16(int16(f.Err))
|
||||
if err := pe.putNullableString(f.ErrMsg); err != nil {
|
||||
return err
|
||||
|
@ -74,7 +81,7 @@ func (f *FilterResponse) encode(pe packetEncoder) error {
|
|||
return err
|
||||
}
|
||||
for _, matchingAcl := range f.MatchingAcls {
|
||||
if err := matchingAcl.encode(pe); err != nil {
|
||||
if err := matchingAcl.encode(pe, version); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -108,6 +115,7 @@ func (f *FilterResponse) decode(pd packetDecoder, version int16) (err error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
//MatchingAcl is a matching acl type
|
||||
type MatchingAcl struct {
|
||||
Err KError
|
||||
ErrMsg *string
|
||||
|
@ -115,13 +123,13 @@ type MatchingAcl struct {
|
|||
Acl
|
||||
}
|
||||
|
||||
func (m *MatchingAcl) encode(pe packetEncoder) error {
|
||||
func (m *MatchingAcl) encode(pe packetEncoder, version int16) error {
|
||||
pe.putInt16(int16(m.Err))
|
||||
if err := pe.putNullableString(m.ErrMsg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := m.Resource.encode(pe); err != nil {
|
||||
if err := m.Resource.encode(pe, version); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -1,14 +1,19 @@
|
|||
package sarama
|
||||
|
||||
//DescribeAclsRequest is a secribe acl request type
|
||||
type DescribeAclsRequest struct {
|
||||
Version int
|
||||
AclFilter
|
||||
}
|
||||
|
||||
func (d *DescribeAclsRequest) encode(pe packetEncoder) error {
|
||||
d.AclFilter.Version = d.Version
|
||||
return d.AclFilter.encode(pe)
|
||||
}
|
||||
|
||||
func (d *DescribeAclsRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
d.Version = int(version)
|
||||
d.AclFilter.Version = int(version)
|
||||
return d.AclFilter.decode(pd, version)
|
||||
}
|
||||
|
||||
|
@ -17,9 +22,18 @@ func (d *DescribeAclsRequest) key() int16 {
|
|||
}
|
||||
|
||||
func (d *DescribeAclsRequest) version() int16 {
|
||||
return 0
|
||||
return int16(d.Version)
|
||||
}
|
||||
|
||||
func (d *DescribeAclsRequest) headerVersion() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (d *DescribeAclsRequest) requiredVersion() KafkaVersion {
|
||||
return V0_11_0_0
|
||||
switch d.Version {
|
||||
case 1:
|
||||
return V2_0_0_0
|
||||
default:
|
||||
return V0_11_0_0
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,7 +2,9 @@ package sarama
|
|||
|
||||
import "time"
|
||||
|
||||
//DescribeAclsResponse is a describe acl response type
|
||||
type DescribeAclsResponse struct {
|
||||
Version int16
|
||||
ThrottleTime time.Duration
|
||||
Err KError
|
||||
ErrMsg *string
|
||||
|
@ -22,7 +24,7 @@ func (d *DescribeAclsResponse) encode(pe packetEncoder) error {
|
|||
}
|
||||
|
||||
for _, resourceAcl := range d.ResourceAcls {
|
||||
if err := resourceAcl.encode(pe); err != nil {
|
||||
if err := resourceAcl.encode(pe, d.Version); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -72,9 +74,18 @@ func (d *DescribeAclsResponse) key() int16 {
|
|||
}
|
||||
|
||||
func (d *DescribeAclsResponse) version() int16 {
|
||||
return d.Version
|
||||
}
|
||||
|
||||
func (d *DescribeAclsResponse) headerVersion() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (d *DescribeAclsResponse) requiredVersion() KafkaVersion {
|
||||
return V0_11_0_0
|
||||
switch d.Version {
|
||||
case 1:
|
||||
return V2_0_0_0
|
||||
default:
|
||||
return V0_11_0_0
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,12 +1,14 @@
|
|||
package sarama
|
||||
|
||||
type AclFilter struct {
|
||||
ResourceType AclResourceType
|
||||
ResourceName *string
|
||||
Principal *string
|
||||
Host *string
|
||||
Operation AclOperation
|
||||
PermissionType AclPermissionType
|
||||
Version int
|
||||
ResourceType AclResourceType
|
||||
ResourceName *string
|
||||
ResourcePatternTypeFilter AclResourcePatternType
|
||||
Principal *string
|
||||
Host *string
|
||||
Operation AclOperation
|
||||
PermissionType AclPermissionType
|
||||
}
|
||||
|
||||
func (a *AclFilter) encode(pe packetEncoder) error {
|
||||
|
@ -14,6 +16,11 @@ func (a *AclFilter) encode(pe packetEncoder) error {
|
|||
if err := pe.putNullableString(a.ResourceName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if a.Version == 1 {
|
||||
pe.putInt8(int8(a.ResourcePatternTypeFilter))
|
||||
}
|
||||
|
||||
if err := pe.putNullableString(a.Principal); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -37,6 +44,16 @@ func (a *AclFilter) decode(pd packetDecoder, version int16) (err error) {
|
|||
return err
|
||||
}
|
||||
|
||||
if a.Version == 1 {
|
||||
pattern, err := pd.getInt8()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a.ResourcePatternTypeFilter = AclResourcePatternType(pattern)
|
||||
}
|
||||
|
||||
if a.Principal, err = pd.getNullableString(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -1,42 +1,55 @@
|
|||
package sarama
|
||||
|
||||
type AclOperation int
|
||||
type (
|
||||
AclOperation int
|
||||
|
||||
AclPermissionType int
|
||||
|
||||
AclResourceType int
|
||||
|
||||
AclResourcePatternType int
|
||||
)
|
||||
|
||||
// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclOperation.java
|
||||
const (
|
||||
AclOperationUnknown AclOperation = 0
|
||||
AclOperationAny AclOperation = 1
|
||||
AclOperationAll AclOperation = 2
|
||||
AclOperationRead AclOperation = 3
|
||||
AclOperationWrite AclOperation = 4
|
||||
AclOperationCreate AclOperation = 5
|
||||
AclOperationDelete AclOperation = 6
|
||||
AclOperationAlter AclOperation = 7
|
||||
AclOperationDescribe AclOperation = 8
|
||||
AclOperationClusterAction AclOperation = 9
|
||||
AclOperationDescribeConfigs AclOperation = 10
|
||||
AclOperationAlterConfigs AclOperation = 11
|
||||
AclOperationIdempotentWrite AclOperation = 12
|
||||
AclOperationUnknown AclOperation = iota
|
||||
AclOperationAny
|
||||
AclOperationAll
|
||||
AclOperationRead
|
||||
AclOperationWrite
|
||||
AclOperationCreate
|
||||
AclOperationDelete
|
||||
AclOperationAlter
|
||||
AclOperationDescribe
|
||||
AclOperationClusterAction
|
||||
AclOperationDescribeConfigs
|
||||
AclOperationAlterConfigs
|
||||
AclOperationIdempotentWrite
|
||||
)
|
||||
|
||||
type AclPermissionType int
|
||||
|
||||
// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclPermissionType.java
|
||||
const (
|
||||
AclPermissionUnknown AclPermissionType = 0
|
||||
AclPermissionAny AclPermissionType = 1
|
||||
AclPermissionDeny AclPermissionType = 2
|
||||
AclPermissionAllow AclPermissionType = 3
|
||||
AclPermissionUnknown AclPermissionType = iota
|
||||
AclPermissionAny
|
||||
AclPermissionDeny
|
||||
AclPermissionAllow
|
||||
)
|
||||
|
||||
type AclResourceType int
|
||||
|
||||
// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java
|
||||
const (
|
||||
AclResourceUnknown AclResourceType = 0
|
||||
AclResourceAny AclResourceType = 1
|
||||
AclResourceTopic AclResourceType = 2
|
||||
AclResourceGroup AclResourceType = 3
|
||||
AclResourceCluster AclResourceType = 4
|
||||
AclResourceTransactionalID AclResourceType = 5
|
||||
AclResourceUnknown AclResourceType = iota
|
||||
AclResourceAny
|
||||
AclResourceTopic
|
||||
AclResourceGroup
|
||||
AclResourceCluster
|
||||
AclResourceTransactionalID
|
||||
)
|
||||
|
||||
// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/PatternType.java
|
||||
const (
|
||||
AclPatternUnknown AclResourcePatternType = iota
|
||||
AclPatternAny
|
||||
AclPatternMatch
|
||||
AclPatternLiteral
|
||||
AclPatternPrefixed
|
||||
)
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
package sarama
|
||||
|
||||
//AddOffsetsToTxnRequest adds offsets to a transaction request
|
||||
type AddOffsetsToTxnRequest struct {
|
||||
TransactionalID string
|
||||
ProducerID int64
|
||||
|
@ -47,6 +48,10 @@ func (a *AddOffsetsToTxnRequest) version() int16 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (a *AddOffsetsToTxnRequest) headerVersion() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (a *AddOffsetsToTxnRequest) requiredVersion() KafkaVersion {
|
||||
return V0_11_0_0
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
//AddOffsetsToTxnResponse is a response type for adding offsets to txns
|
||||
type AddOffsetsToTxnResponse struct {
|
||||
ThrottleTime time.Duration
|
||||
Err KError
|
||||
|
@ -39,6 +40,10 @@ func (a *AddOffsetsToTxnResponse) version() int16 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (a *AddOffsetsToTxnResponse) headerVersion() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (a *AddOffsetsToTxnResponse) requiredVersion() KafkaVersion {
|
||||
return V0_11_0_0
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
package sarama
|
||||
|
||||
//AddPartitionsToTxnRequest is a add paartition request
|
||||
type AddPartitionsToTxnRequest struct {
|
||||
TransactionalID string
|
||||
ProducerID int64
|
||||
|
@ -71,6 +72,10 @@ func (a *AddPartitionsToTxnRequest) version() int16 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (a *AddPartitionsToTxnRequest) headerVersion() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (a *AddPartitionsToTxnRequest) requiredVersion() KafkaVersion {
|
||||
return V0_11_0_0
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
//AddPartitionsToTxnResponse is a partition errors to transaction type
|
||||
type AddPartitionsToTxnResponse struct {
|
||||
ThrottleTime time.Duration
|
||||
Errors map[string][]*PartitionError
|
||||
|
@ -78,10 +79,15 @@ func (a *AddPartitionsToTxnResponse) version() int16 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (a *AddPartitionsToTxnResponse) headerVersion() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion {
|
||||
return V0_11_0_0
|
||||
}
|
||||
|
||||
//PartitionError is a partition error type
|
||||
type PartitionError struct {
|
||||
Partition int32
|
||||
Err KError
|
||||
|
|
|
@ -1,6 +1,13 @@
|
|||
package sarama
|
||||
|
||||
import "errors"
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ClusterAdmin is the administrative client for Kafka, which supports managing and inspecting topics,
|
||||
// brokers, configurations and ACLs. The minimum broker version required is 0.10.0.0.
|
||||
|
@ -13,6 +20,12 @@ type ClusterAdmin interface {
|
|||
// may not return information about the new topic.The validateOnly option is supported from version 0.10.2.0.
|
||||
CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error
|
||||
|
||||
// List the topics available in the cluster with the default options.
|
||||
ListTopics() (map[string]TopicDetail, error)
|
||||
|
||||
// Describe some topics in the cluster.
|
||||
DescribeTopics(topics []string) (metadata []*TopicMetadata, err error)
|
||||
|
||||
// Delete a topic. It may take several seconds after the DeleteTopic to returns success
|
||||
// and for all the brokers to become aware that the topics are gone.
|
||||
// During this time, listTopics may continue to return information about the deleted topic.
|
||||
|
@ -29,6 +42,14 @@ type ClusterAdmin interface {
|
|||
// new partitions. This operation is supported by brokers with version 1.0.0 or higher.
|
||||
CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error
|
||||
|
||||
// Alter the replica assignment for partitions.
|
||||
// This operation is supported by brokers with version 2.4.0.0 or higher.
|
||||
AlterPartitionReassignments(topic string, assignment [][]int32) error
|
||||
|
||||
// Provides info on ongoing partitions replica reassignments.
|
||||
// This operation is supported by brokers with version 2.4.0.0 or higher.
|
||||
ListPartitionReassignments(topics string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error)
|
||||
|
||||
// Delete records whose offset is smaller than the given offset of the corresponding partition.
|
||||
// This operation is supported by brokers with version 0.11.0.0 or higher.
|
||||
DeleteRecords(topic string, partitionOffsets map[int32]int64) error
|
||||
|
@ -65,6 +86,24 @@ type ClusterAdmin interface {
|
|||
// This operation is supported by brokers with version 0.11.0.0 or higher.
|
||||
DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error)
|
||||
|
||||
// List the consumer groups available in the cluster.
|
||||
ListConsumerGroups() (map[string]string, error)
|
||||
|
||||
// Describe the given consumer groups.
|
||||
DescribeConsumerGroups(groups []string) ([]*GroupDescription, error)
|
||||
|
||||
// List the consumer group offsets available in the cluster.
|
||||
ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error)
|
||||
|
||||
// Delete a consumer group.
|
||||
DeleteConsumerGroup(group string) error
|
||||
|
||||
// Get information about the nodes in the cluster
|
||||
DescribeCluster() (brokers []*Broker, controllerID int32, err error)
|
||||
|
||||
// Get information about all log directories on the given set of brokers
|
||||
DescribeLogDirs(brokers []int32) (map[int32][]DescribeLogDirsResponseDirMetadata, error)
|
||||
|
||||
// Close shuts down the admin and closes underlying client.
|
||||
Close() error
|
||||
}
|
||||
|
@ -80,9 +119,14 @@ func NewClusterAdmin(addrs []string, conf *Config) (ClusterAdmin, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewClusterAdminFromClient(client)
|
||||
}
|
||||
|
||||
// NewClusterAdminFromClient creates a new ClusterAdmin using the given client.
|
||||
// Note that underlying client will also be closed on admin's Close() call.
|
||||
func NewClusterAdminFromClient(client Client) (ClusterAdmin, error) {
|
||||
//make sure we can retrieve the controller
|
||||
_, err = client.Controller()
|
||||
_, err := client.Controller()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -102,14 +146,51 @@ func (ca *clusterAdmin) Controller() (*Broker, error) {
|
|||
return ca.client.Controller()
|
||||
}
|
||||
|
||||
func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error {
|
||||
func (ca *clusterAdmin) refreshController() (*Broker, error) {
|
||||
return ca.client.RefreshController()
|
||||
}
|
||||
|
||||
// isErrNoController returns `true` if the given error type unwraps to an
|
||||
// `ErrNotController` response from Kafka
|
||||
func isErrNoController(err error) bool {
|
||||
switch e := err.(type) {
|
||||
case *TopicError:
|
||||
return e.Err == ErrNotController
|
||||
case *TopicPartitionError:
|
||||
return e.Err == ErrNotController
|
||||
case KError:
|
||||
return e == ErrNotController
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// retryOnError will repeatedly call the given (error-returning) func in the
|
||||
// case that its response is non-nil and retriable (as determined by the
|
||||
// provided retriable func) up to the maximum number of tries permitted by
|
||||
// the admin client configuration
|
||||
func (ca *clusterAdmin) retryOnError(retriable func(error) bool, fn func() error) error {
|
||||
var err error
|
||||
for attempt := 0; attempt < ca.conf.Admin.Retry.Max; attempt++ {
|
||||
err = fn()
|
||||
if err == nil || !retriable(err) {
|
||||
return err
|
||||
}
|
||||
Logger.Printf(
|
||||
"admin/request retrying after %dms... (%d attempts remaining)\n",
|
||||
ca.conf.Admin.Retry.Backoff/time.Millisecond, ca.conf.Admin.Retry.Max-attempt)
|
||||
time.Sleep(ca.conf.Admin.Retry.Backoff)
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error {
|
||||
if topic == "" {
|
||||
return ErrInvalidTopic
|
||||
}
|
||||
|
||||
if detail == nil {
|
||||
return errors.New("You must specify topic details")
|
||||
return errors.New("you must specify topic details")
|
||||
}
|
||||
|
||||
topicDetails := make(map[string]*TopicDetail)
|
||||
|
@ -128,30 +209,180 @@ func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateO
|
|||
request.Version = 2
|
||||
}
|
||||
|
||||
b, err := ca.Controller()
|
||||
return ca.retryOnError(isErrNoController, func() error {
|
||||
b, err := ca.Controller()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rsp, err := b.CreateTopics(request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
topicErr, ok := rsp.TopicErrors[topic]
|
||||
if !ok {
|
||||
return ErrIncompleteResponse
|
||||
}
|
||||
|
||||
if topicErr.Err != ErrNoError {
|
||||
if topicErr.Err == ErrNotController {
|
||||
_, _ = ca.refreshController()
|
||||
}
|
||||
return topicErr
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) {
|
||||
controller, err := ca.Controller()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rsp, err := b.CreateTopics(request)
|
||||
request := &MetadataRequest{
|
||||
Topics: topics,
|
||||
AllowAutoTopicCreation: false,
|
||||
}
|
||||
|
||||
if ca.conf.Version.IsAtLeast(V1_0_0_0) {
|
||||
request.Version = 5
|
||||
} else if ca.conf.Version.IsAtLeast(V0_11_0_0) {
|
||||
request.Version = 4
|
||||
}
|
||||
|
||||
response, err := controller.GetMetadata(request)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
return response.Topics, nil
|
||||
}
|
||||
|
||||
func (ca *clusterAdmin) DescribeCluster() (brokers []*Broker, controllerID int32, err error) {
|
||||
controller, err := ca.Controller()
|
||||
if err != nil {
|
||||
return nil, int32(0), err
|
||||
}
|
||||
|
||||
topicErr, ok := rsp.TopicErrors[topic]
|
||||
if !ok {
|
||||
return ErrIncompleteResponse
|
||||
request := &MetadataRequest{
|
||||
Topics: []string{},
|
||||
}
|
||||
|
||||
if topicErr.Err != ErrNoError {
|
||||
return topicErr.Err
|
||||
if ca.conf.Version.IsAtLeast(V0_10_0_0) {
|
||||
request.Version = 1
|
||||
}
|
||||
|
||||
return nil
|
||||
response, err := controller.GetMetadata(request)
|
||||
if err != nil {
|
||||
return nil, int32(0), err
|
||||
}
|
||||
|
||||
return response.Brokers, response.ControllerID, nil
|
||||
}
|
||||
|
||||
func (ca *clusterAdmin) findBroker(id int32) (*Broker, error) {
|
||||
brokers := ca.client.Brokers()
|
||||
for _, b := range brokers {
|
||||
if b.ID() == id {
|
||||
return b, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("could not find broker id %d", id)
|
||||
}
|
||||
|
||||
func (ca *clusterAdmin) findAnyBroker() (*Broker, error) {
|
||||
brokers := ca.client.Brokers()
|
||||
if len(brokers) > 0 {
|
||||
index := rand.Intn(len(brokers))
|
||||
return brokers[index], nil
|
||||
}
|
||||
return nil, errors.New("no available broker")
|
||||
}
|
||||
|
||||
func (ca *clusterAdmin) ListTopics() (map[string]TopicDetail, error) {
|
||||
// In order to build TopicDetails we need to first get the list of all
|
||||
// topics using a MetadataRequest and then get their configs using a
|
||||
// DescribeConfigsRequest request. To avoid sending many requests to the
|
||||
// broker, we use a single DescribeConfigsRequest.
|
||||
|
||||
// Send the all-topic MetadataRequest
|
||||
b, err := ca.findAnyBroker()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_ = b.Open(ca.client.Config())
|
||||
|
||||
metadataReq := &MetadataRequest{}
|
||||
metadataResp, err := b.GetMetadata(metadataReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
topicsDetailsMap := make(map[string]TopicDetail)
|
||||
|
||||
var describeConfigsResources []*ConfigResource
|
||||
|
||||
for _, topic := range metadataResp.Topics {
|
||||
topicDetails := TopicDetail{
|
||||
NumPartitions: int32(len(topic.Partitions)),
|
||||
}
|
||||
if len(topic.Partitions) > 0 {
|
||||
topicDetails.ReplicaAssignment = map[int32][]int32{}
|
||||
for _, partition := range topic.Partitions {
|
||||
topicDetails.ReplicaAssignment[partition.ID] = partition.Replicas
|
||||
}
|
||||
topicDetails.ReplicationFactor = int16(len(topic.Partitions[0].Replicas))
|
||||
}
|
||||
topicsDetailsMap[topic.Name] = topicDetails
|
||||
|
||||
// we populate the resources we want to describe from the MetadataResponse
|
||||
topicResource := ConfigResource{
|
||||
Type: TopicResource,
|
||||
Name: topic.Name,
|
||||
}
|
||||
describeConfigsResources = append(describeConfigsResources, &topicResource)
|
||||
}
|
||||
|
||||
// Send the DescribeConfigsRequest
|
||||
describeConfigsReq := &DescribeConfigsRequest{
|
||||
Resources: describeConfigsResources,
|
||||
}
|
||||
|
||||
if ca.conf.Version.IsAtLeast(V1_1_0_0) {
|
||||
describeConfigsReq.Version = 1
|
||||
}
|
||||
|
||||
if ca.conf.Version.IsAtLeast(V2_0_0_0) {
|
||||
describeConfigsReq.Version = 2
|
||||
}
|
||||
|
||||
describeConfigsResp, err := b.DescribeConfigs(describeConfigsReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, resource := range describeConfigsResp.Resources {
|
||||
topicDetails := topicsDetailsMap[resource.Name]
|
||||
topicDetails.ConfigEntries = make(map[string]*string)
|
||||
|
||||
for _, entry := range resource.Configs {
|
||||
// only include non-default non-sensitive config
|
||||
// (don't actually think topic config will ever be sensitive)
|
||||
if entry.Default || entry.Sensitive {
|
||||
continue
|
||||
}
|
||||
topicDetails.ConfigEntries[entry.Name] = &entry.Value
|
||||
}
|
||||
|
||||
topicsDetailsMap[resource.Name] = topicDetails
|
||||
}
|
||||
|
||||
return topicsDetailsMap, nil
|
||||
}
|
||||
|
||||
func (ca *clusterAdmin) DeleteTopic(topic string) error {
|
||||
|
||||
if topic == "" {
|
||||
return ErrInvalidTopic
|
||||
}
|
||||
|
@ -165,25 +396,31 @@ func (ca *clusterAdmin) DeleteTopic(topic string) error {
|
|||
request.Version = 1
|
||||
}
|
||||
|
||||
b, err := ca.Controller()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ca.retryOnError(isErrNoController, func() error {
|
||||
b, err := ca.Controller()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rsp, err := b.DeleteTopics(request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rsp, err := b.DeleteTopics(request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
topicErr, ok := rsp.TopicErrorCodes[topic]
|
||||
if !ok {
|
||||
return ErrIncompleteResponse
|
||||
}
|
||||
topicErr, ok := rsp.TopicErrorCodes[topic]
|
||||
if !ok {
|
||||
return ErrIncompleteResponse
|
||||
}
|
||||
|
||||
if topicErr != ErrNoError {
|
||||
return topicErr
|
||||
}
|
||||
return nil
|
||||
if topicErr != ErrNoError {
|
||||
if topicErr == ErrNotController {
|
||||
_, _ = ca.refreshController()
|
||||
}
|
||||
return topicErr
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error {
|
||||
|
@ -199,63 +436,170 @@ func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [
|
|||
Timeout: ca.conf.Admin.Timeout,
|
||||
}
|
||||
|
||||
b, err := ca.Controller()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ca.retryOnError(isErrNoController, func() error {
|
||||
b, err := ca.Controller()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rsp, err := b.CreatePartitions(request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rsp, err := b.CreatePartitions(request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
topicErr, ok := rsp.TopicPartitionErrors[topic]
|
||||
if !ok {
|
||||
return ErrIncompleteResponse
|
||||
}
|
||||
topicErr, ok := rsp.TopicPartitionErrors[topic]
|
||||
if !ok {
|
||||
return ErrIncompleteResponse
|
||||
}
|
||||
|
||||
if topicErr.Err != ErrNoError {
|
||||
return topicErr.Err
|
||||
}
|
||||
if topicErr.Err != ErrNoError {
|
||||
if topicErr.Err == ErrNotController {
|
||||
_, _ = ca.refreshController()
|
||||
}
|
||||
return topicErr
|
||||
}
|
||||
|
||||
return nil
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (ca *clusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]int64) error {
|
||||
|
||||
func (ca *clusterAdmin) AlterPartitionReassignments(topic string, assignment [][]int32) error {
|
||||
if topic == "" {
|
||||
return ErrInvalidTopic
|
||||
}
|
||||
|
||||
topics := make(map[string]*DeleteRecordsRequestTopic)
|
||||
topics[topic] = &DeleteRecordsRequestTopic{PartitionOffsets: partitionOffsets}
|
||||
request := &DeleteRecordsRequest{
|
||||
Topics: topics,
|
||||
Timeout: ca.conf.Admin.Timeout,
|
||||
request := &AlterPartitionReassignmentsRequest{
|
||||
TimeoutMs: int32(60000),
|
||||
Version: int16(0),
|
||||
}
|
||||
|
||||
for i := 0; i < len(assignment); i++ {
|
||||
request.AddBlock(topic, int32(i), assignment[i])
|
||||
}
|
||||
|
||||
return ca.retryOnError(isErrNoController, func() error {
|
||||
b, err := ca.Controller()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
errs := make([]error, 0)
|
||||
|
||||
rsp, err := b.AlterPartitionReassignments(request)
|
||||
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
} else {
|
||||
if rsp.ErrorCode > 0 {
|
||||
errs = append(errs, errors.New(rsp.ErrorCode.Error()))
|
||||
}
|
||||
|
||||
for topic, topicErrors := range rsp.Errors {
|
||||
for partition, partitionError := range topicErrors {
|
||||
if partitionError.errorCode != ErrNoError {
|
||||
errStr := fmt.Sprintf("[%s-%d]: %s", topic, partition, partitionError.errorCode.Error())
|
||||
errs = append(errs, errors.New(errStr))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
return ErrReassignPartitions{MultiError{&errs}}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (ca *clusterAdmin) ListPartitionReassignments(topic string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error) {
|
||||
if topic == "" {
|
||||
return nil, ErrInvalidTopic
|
||||
}
|
||||
|
||||
request := &ListPartitionReassignmentsRequest{
|
||||
TimeoutMs: int32(60000),
|
||||
Version: int16(0),
|
||||
}
|
||||
|
||||
request.AddBlock(topic, partitions)
|
||||
|
||||
b, err := ca.Controller()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
_ = b.Open(ca.client.Config())
|
||||
|
||||
rsp, err := b.DeleteRecords(request)
|
||||
if err != nil {
|
||||
return err
|
||||
rsp, err := b.ListPartitionReassignments(request)
|
||||
|
||||
if err == nil && rsp != nil {
|
||||
return rsp.TopicStatus, nil
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
_, ok := rsp.Topics[topic]
|
||||
if !ok {
|
||||
return ErrIncompleteResponse
|
||||
func (ca *clusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]int64) error {
|
||||
if topic == "" {
|
||||
return ErrInvalidTopic
|
||||
}
|
||||
partitionPerBroker := make(map[*Broker][]int32)
|
||||
for partition := range partitionOffsets {
|
||||
broker, err := ca.client.Leader(topic, partition)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, ok := partitionPerBroker[broker]; ok {
|
||||
partitionPerBroker[broker] = append(partitionPerBroker[broker], partition)
|
||||
} else {
|
||||
partitionPerBroker[broker] = []int32{partition}
|
||||
}
|
||||
}
|
||||
errs := make([]error, 0)
|
||||
for broker, partitions := range partitionPerBroker {
|
||||
topics := make(map[string]*DeleteRecordsRequestTopic)
|
||||
recordsToDelete := make(map[int32]int64)
|
||||
for _, p := range partitions {
|
||||
recordsToDelete[p] = partitionOffsets[p]
|
||||
}
|
||||
topics[topic] = &DeleteRecordsRequestTopic{PartitionOffsets: recordsToDelete}
|
||||
request := &DeleteRecordsRequest{
|
||||
Topics: topics,
|
||||
Timeout: ca.conf.Admin.Timeout,
|
||||
}
|
||||
|
||||
rsp, err := broker.DeleteRecords(request)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
} else {
|
||||
deleteRecordsResponseTopic, ok := rsp.Topics[topic]
|
||||
if !ok {
|
||||
errs = append(errs, ErrIncompleteResponse)
|
||||
} else {
|
||||
for _, deleteRecordsResponsePartition := range deleteRecordsResponseTopic.Partitions {
|
||||
if deleteRecordsResponsePartition.Err != ErrNoError {
|
||||
errs = append(errs, errors.New(deleteRecordsResponsePartition.Err.Error()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
return ErrDeleteRecords{MultiError{&errs}}
|
||||
}
|
||||
//todo since we are dealing with couple of partitions it would be good if we return slice of errors
|
||||
//for each partition instead of one error
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, error) {
|
||||
// Returns a bool indicating whether the resource request needs to go to a
|
||||
// specific broker
|
||||
func dependsOnSpecificNode(resource ConfigResource) bool {
|
||||
return (resource.Type == BrokerResource && resource.Name != "") ||
|
||||
resource.Type == BrokerLoggerResource
|
||||
}
|
||||
|
||||
func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, error) {
|
||||
var entries []ConfigEntry
|
||||
var resources []*ConfigResource
|
||||
resources = append(resources, &resource)
|
||||
|
@ -264,11 +608,31 @@ func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry,
|
|||
Resources: resources,
|
||||
}
|
||||
|
||||
b, err := ca.Controller()
|
||||
if ca.conf.Version.IsAtLeast(V1_1_0_0) {
|
||||
request.Version = 1
|
||||
}
|
||||
|
||||
if ca.conf.Version.IsAtLeast(V2_0_0_0) {
|
||||
request.Version = 2
|
||||
}
|
||||
|
||||
var (
|
||||
b *Broker
|
||||
err error
|
||||
)
|
||||
|
||||
// DescribeConfig of broker/broker logger must be sent to the broker in question
|
||||
if dependsOnSpecificNode(resource) {
|
||||
id, _ := strconv.Atoi(resource.Name)
|
||||
b, err = ca.findBroker(int32(id))
|
||||
} else {
|
||||
b, err = ca.findAnyBroker()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_ = b.Open(ca.client.Config())
|
||||
rsp, err := b.DescribeConfigs(request)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -279,6 +643,9 @@ func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry,
|
|||
if rspResource.ErrorMsg != "" {
|
||||
return nil, errors.New(rspResource.ErrorMsg)
|
||||
}
|
||||
if rspResource.ErrorCode != 0 {
|
||||
return nil, KError(rspResource.ErrorCode)
|
||||
}
|
||||
for _, cfgEntry := range rspResource.Configs {
|
||||
entries = append(entries, *cfgEntry)
|
||||
}
|
||||
|
@ -288,7 +655,6 @@ func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry,
|
|||
}
|
||||
|
||||
func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error {
|
||||
|
||||
var resources []*AlterConfigsResource
|
||||
resources = append(resources, &AlterConfigsResource{
|
||||
Type: resourceType,
|
||||
|
@ -301,11 +667,23 @@ func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string
|
|||
ValidateOnly: validateOnly,
|
||||
}
|
||||
|
||||
b, err := ca.Controller()
|
||||
var (
|
||||
b *Broker
|
||||
err error
|
||||
)
|
||||
|
||||
// AlterConfig of broker/broker logger must be sent to the broker in question
|
||||
if dependsOnSpecificNode(ConfigResource{Name: name, Type: resourceType}) {
|
||||
id, _ := strconv.Atoi(name)
|
||||
b, err = ca.findBroker(int32(id))
|
||||
} else {
|
||||
b, err = ca.findAnyBroker()
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_ = b.Open(ca.client.Config())
|
||||
rsp, err := b.AlterConfigs(request)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -316,6 +694,9 @@ func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string
|
|||
if rspResource.ErrorMsg != "" {
|
||||
return errors.New(rspResource.ErrorMsg)
|
||||
}
|
||||
if rspResource.ErrorCode != 0 {
|
||||
return KError(rspResource.ErrorCode)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -326,6 +707,10 @@ func (ca *clusterAdmin) CreateACL(resource Resource, acl Acl) error {
|
|||
acls = append(acls, &AclCreation{resource, acl})
|
||||
request := &CreateAclsRequest{AclCreations: acls}
|
||||
|
||||
if ca.conf.Version.IsAtLeast(V2_0_0_0) {
|
||||
request.Version = 1
|
||||
}
|
||||
|
||||
b, err := ca.Controller()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -336,9 +721,12 @@ func (ca *clusterAdmin) CreateACL(resource Resource, acl Acl) error {
|
|||
}
|
||||
|
||||
func (ca *clusterAdmin) ListAcls(filter AclFilter) ([]ResourceAcls, error) {
|
||||
|
||||
request := &DescribeAclsRequest{AclFilter: filter}
|
||||
|
||||
if ca.conf.Version.IsAtLeast(V2_0_0_0) {
|
||||
request.Version = 1
|
||||
}
|
||||
|
||||
b, err := ca.Controller()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -361,6 +749,10 @@ func (ca *clusterAdmin) DeleteACL(filter AclFilter, validateOnly bool) ([]Matchi
|
|||
filters = append(filters, &filter)
|
||||
request := &DeleteAclsRequest{Filters: filters}
|
||||
|
||||
if ca.conf.Version.IsAtLeast(V2_0_0_0) {
|
||||
request.Version = 1
|
||||
}
|
||||
|
||||
b, err := ca.Controller()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -376,7 +768,167 @@ func (ca *clusterAdmin) DeleteACL(filter AclFilter, validateOnly bool) ([]Matchi
|
|||
for _, mACL := range fr.MatchingAcls {
|
||||
mAcls = append(mAcls, *mACL)
|
||||
}
|
||||
|
||||
}
|
||||
return mAcls, nil
|
||||
}
|
||||
|
||||
func (ca *clusterAdmin) DescribeConsumerGroups(groups []string) (result []*GroupDescription, err error) {
|
||||
groupsPerBroker := make(map[*Broker][]string)
|
||||
|
||||
for _, group := range groups {
|
||||
controller, err := ca.client.Coordinator(group)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
groupsPerBroker[controller] = append(groupsPerBroker[controller], group)
|
||||
}
|
||||
|
||||
for broker, brokerGroups := range groupsPerBroker {
|
||||
response, err := broker.DescribeGroups(&DescribeGroupsRequest{
|
||||
Groups: brokerGroups,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result = append(result, response.Groups...)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (ca *clusterAdmin) ListConsumerGroups() (allGroups map[string]string, err error) {
|
||||
allGroups = make(map[string]string)
|
||||
|
||||
// Query brokers in parallel, since we have to query *all* brokers
|
||||
brokers := ca.client.Brokers()
|
||||
groupMaps := make(chan map[string]string, len(brokers))
|
||||
errChan := make(chan error, len(brokers))
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
for _, b := range brokers {
|
||||
wg.Add(1)
|
||||
go func(b *Broker, conf *Config) {
|
||||
defer wg.Done()
|
||||
_ = b.Open(conf) // Ensure that broker is opened
|
||||
|
||||
response, err := b.ListGroups(&ListGroupsRequest{})
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
groups := make(map[string]string)
|
||||
for group, typ := range response.Groups {
|
||||
groups[group] = typ
|
||||
}
|
||||
|
||||
groupMaps <- groups
|
||||
}(b, ca.conf)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(groupMaps)
|
||||
close(errChan)
|
||||
|
||||
for groupMap := range groupMaps {
|
||||
for group, protocolType := range groupMap {
|
||||
allGroups[group] = protocolType
|
||||
}
|
||||
}
|
||||
|
||||
// Intentionally return only the first error for simplicity
|
||||
err = <-errChan
|
||||
return
|
||||
}
|
||||
|
||||
func (ca *clusterAdmin) ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error) {
|
||||
coordinator, err := ca.client.Coordinator(group)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
request := &OffsetFetchRequest{
|
||||
ConsumerGroup: group,
|
||||
partitions: topicPartitions,
|
||||
}
|
||||
|
||||
if ca.conf.Version.IsAtLeast(V0_10_2_0) {
|
||||
request.Version = 2
|
||||
} else if ca.conf.Version.IsAtLeast(V0_8_2_2) {
|
||||
request.Version = 1
|
||||
}
|
||||
|
||||
return coordinator.FetchOffset(request)
|
||||
}
|
||||
|
||||
func (ca *clusterAdmin) DeleteConsumerGroup(group string) error {
|
||||
coordinator, err := ca.client.Coordinator(group)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
request := &DeleteGroupsRequest{
|
||||
Groups: []string{group},
|
||||
}
|
||||
|
||||
resp, err := coordinator.DeleteGroups(request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
groupErr, ok := resp.GroupErrorCodes[group]
|
||||
if !ok {
|
||||
return ErrIncompleteResponse
|
||||
}
|
||||
|
||||
if groupErr != ErrNoError {
|
||||
return groupErr
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ca *clusterAdmin) DescribeLogDirs(brokerIds []int32) (allLogDirs map[int32][]DescribeLogDirsResponseDirMetadata, err error) {
|
||||
allLogDirs = make(map[int32][]DescribeLogDirsResponseDirMetadata)
|
||||
|
||||
// Query brokers in parallel, since we may have to query multiple brokers
|
||||
logDirsMaps := make(chan map[int32][]DescribeLogDirsResponseDirMetadata, len(brokerIds))
|
||||
errChan := make(chan error, len(brokerIds))
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
for _, b := range brokerIds {
|
||||
wg.Add(1)
|
||||
broker, err := ca.findBroker(b)
|
||||
if err != nil {
|
||||
Logger.Printf("Unable to find broker with ID = %v\n", b)
|
||||
continue
|
||||
}
|
||||
go func(b *Broker, conf *Config) {
|
||||
defer wg.Done()
|
||||
_ = b.Open(conf) // Ensure that broker is opened
|
||||
|
||||
response, err := b.DescribeLogDirs(&DescribeLogDirsRequest{})
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
logDirs := make(map[int32][]DescribeLogDirsResponseDirMetadata)
|
||||
logDirs[b.ID()] = response.LogDirs
|
||||
logDirsMaps <- logDirs
|
||||
}(broker, ca.conf)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(logDirsMaps)
|
||||
close(errChan)
|
||||
|
||||
for logDirsMap := range logDirsMaps {
|
||||
for id, logDirs := range logDirsMap {
|
||||
allLogDirs[id] = logDirs
|
||||
}
|
||||
}
|
||||
|
||||
// Intentionally return only the first error for simplicity
|
||||
err = <-errChan
|
||||
return
|
||||
}
|
||||
|
|
|
@ -1,45 +1,47 @@
|
|||
package sarama
|
||||
|
||||
//AlterConfigsRequest is an alter config request type
|
||||
type AlterConfigsRequest struct {
|
||||
Resources []*AlterConfigsResource
|
||||
ValidateOnly bool
|
||||
}
|
||||
|
||||
//AlterConfigsResource is an alter config resource type
|
||||
type AlterConfigsResource struct {
|
||||
Type ConfigResourceType
|
||||
Name string
|
||||
ConfigEntries map[string]*string
|
||||
}
|
||||
|
||||
func (acr *AlterConfigsRequest) encode(pe packetEncoder) error {
|
||||
if err := pe.putArrayLength(len(acr.Resources)); err != nil {
|
||||
func (a *AlterConfigsRequest) encode(pe packetEncoder) error {
|
||||
if err := pe.putArrayLength(len(a.Resources)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, r := range acr.Resources {
|
||||
for _, r := range a.Resources {
|
||||
if err := r.encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
pe.putBool(acr.ValidateOnly)
|
||||
pe.putBool(a.ValidateOnly)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (acr *AlterConfigsRequest) decode(pd packetDecoder, version int16) error {
|
||||
func (a *AlterConfigsRequest) decode(pd packetDecoder, version int16) error {
|
||||
resourceCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
acr.Resources = make([]*AlterConfigsResource, resourceCount)
|
||||
for i := range acr.Resources {
|
||||
a.Resources = make([]*AlterConfigsResource, resourceCount)
|
||||
for i := range a.Resources {
|
||||
r := &AlterConfigsResource{}
|
||||
err = r.decode(pd, version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
acr.Resources[i] = r
|
||||
a.Resources[i] = r
|
||||
}
|
||||
|
||||
validateOnly, err := pd.getBool()
|
||||
|
@ -47,22 +49,22 @@ func (acr *AlterConfigsRequest) decode(pd packetDecoder, version int16) error {
|
|||
return err
|
||||
}
|
||||
|
||||
acr.ValidateOnly = validateOnly
|
||||
a.ValidateOnly = validateOnly
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ac *AlterConfigsResource) encode(pe packetEncoder) error {
|
||||
pe.putInt8(int8(ac.Type))
|
||||
func (a *AlterConfigsResource) encode(pe packetEncoder) error {
|
||||
pe.putInt8(int8(a.Type))
|
||||
|
||||
if err := pe.putString(ac.Name); err != nil {
|
||||
if err := pe.putString(a.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := pe.putArrayLength(len(ac.ConfigEntries)); err != nil {
|
||||
if err := pe.putArrayLength(len(a.ConfigEntries)); err != nil {
|
||||
return err
|
||||
}
|
||||
for configKey, configValue := range ac.ConfigEntries {
|
||||
for configKey, configValue := range a.ConfigEntries {
|
||||
if err := pe.putString(configKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -74,18 +76,18 @@ func (ac *AlterConfigsResource) encode(pe packetEncoder) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (ac *AlterConfigsResource) decode(pd packetDecoder, version int16) error {
|
||||
func (a *AlterConfigsResource) decode(pd packetDecoder, version int16) error {
|
||||
t, err := pd.getInt8()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ac.Type = ConfigResourceType(t)
|
||||
a.Type = ConfigResourceType(t)
|
||||
|
||||
name, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ac.Name = name
|
||||
a.Name = name
|
||||
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
|
@ -93,13 +95,13 @@ func (ac *AlterConfigsResource) decode(pd packetDecoder, version int16) error {
|
|||
}
|
||||
|
||||
if n > 0 {
|
||||
ac.ConfigEntries = make(map[string]*string, n)
|
||||
a.ConfigEntries = make(map[string]*string, n)
|
||||
for i := 0; i < n; i++ {
|
||||
configKey, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ac.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
|
||||
if a.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -107,14 +109,18 @@ func (ac *AlterConfigsResource) decode(pd packetDecoder, version int16) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func (acr *AlterConfigsRequest) key() int16 {
|
||||
func (a *AlterConfigsRequest) key() int16 {
|
||||
return 33
|
||||
}
|
||||
|
||||
func (acr *AlterConfigsRequest) version() int16 {
|
||||
func (a *AlterConfigsRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (acr *AlterConfigsRequest) requiredVersion() KafkaVersion {
|
||||
func (a *AlterConfigsRequest) headerVersion() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (a *AlterConfigsRequest) requiredVersion() KafkaVersion {
|
||||
return V0_11_0_0
|
||||
}
|
||||
|
|
|
@ -2,11 +2,13 @@ package sarama
|
|||
|
||||
import "time"
|
||||
|
||||
//AlterConfigsResponse is a response type for alter config
|
||||
type AlterConfigsResponse struct {
|
||||
ThrottleTime time.Duration
|
||||
Resources []*AlterConfigsResourceResponse
|
||||
}
|
||||
|
||||
//AlterConfigsResourceResponse is a response type for alter config resource
|
||||
type AlterConfigsResourceResponse struct {
|
||||
ErrorCode int16
|
||||
ErrorMsg string
|
||||
|
@ -14,21 +16,21 @@ type AlterConfigsResourceResponse struct {
|
|||
Name string
|
||||
}
|
||||
|
||||
func (ct *AlterConfigsResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt32(int32(ct.ThrottleTime / time.Millisecond))
|
||||
func (a *AlterConfigsResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
|
||||
|
||||
if err := pe.putArrayLength(len(ct.Resources)); err != nil {
|
||||
if err := pe.putArrayLength(len(a.Resources)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := range ct.Resources {
|
||||
pe.putInt16(ct.Resources[i].ErrorCode)
|
||||
err := pe.putString(ct.Resources[i].ErrorMsg)
|
||||
for i := range a.Resources {
|
||||
pe.putInt16(a.Resources[i].ErrorCode)
|
||||
err := pe.putString(a.Resources[i].ErrorMsg)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
pe.putInt8(int8(ct.Resources[i].Type))
|
||||
err = pe.putString(ct.Resources[i].Name)
|
||||
pe.putInt8(int8(a.Resources[i].Type))
|
||||
err = pe.putString(a.Resources[i].Name)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -37,59 +39,63 @@ func (ct *AlterConfigsResponse) encode(pe packetEncoder) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (acr *AlterConfigsResponse) decode(pd packetDecoder, version int16) error {
|
||||
func (a *AlterConfigsResponse) decode(pd packetDecoder, version int16) error {
|
||||
throttleTime, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
acr.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
|
||||
a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
|
||||
|
||||
responseCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
acr.Resources = make([]*AlterConfigsResourceResponse, responseCount)
|
||||
a.Resources = make([]*AlterConfigsResourceResponse, responseCount)
|
||||
|
||||
for i := range acr.Resources {
|
||||
acr.Resources[i] = new(AlterConfigsResourceResponse)
|
||||
for i := range a.Resources {
|
||||
a.Resources[i] = new(AlterConfigsResourceResponse)
|
||||
|
||||
errCode, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
acr.Resources[i].ErrorCode = errCode
|
||||
a.Resources[i].ErrorCode = errCode
|
||||
|
||||
e, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
acr.Resources[i].ErrorMsg = e
|
||||
a.Resources[i].ErrorMsg = e
|
||||
|
||||
t, err := pd.getInt8()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
acr.Resources[i].Type = ConfigResourceType(t)
|
||||
a.Resources[i].Type = ConfigResourceType(t)
|
||||
|
||||
name, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
acr.Resources[i].Name = name
|
||||
a.Resources[i].Name = name
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *AlterConfigsResponse) key() int16 {
|
||||
func (a *AlterConfigsResponse) key() int16 {
|
||||
return 32
|
||||
}
|
||||
|
||||
func (r *AlterConfigsResponse) version() int16 {
|
||||
func (a *AlterConfigsResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *AlterConfigsResponse) requiredVersion() KafkaVersion {
|
||||
func (a *AlterConfigsResponse) headerVersion() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (a *AlterConfigsResponse) requiredVersion() KafkaVersion {
|
||||
return V0_11_0_0
|
||||
}
|
||||
|
|
130
vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go
generated
vendored
Normal file
130
vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go
generated
vendored
Normal file
|
@ -0,0 +1,130 @@
|
|||
package sarama
|
||||
|
||||
type alterPartitionReassignmentsBlock struct {
|
||||
replicas []int32
|
||||
}
|
||||
|
||||
func (b *alterPartitionReassignmentsBlock) encode(pe packetEncoder) error {
|
||||
if err := pe.putNullableCompactInt32Array(b.replicas); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pe.putEmptyTaggedFieldArray()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *alterPartitionReassignmentsBlock) decode(pd packetDecoder) (err error) {
|
||||
if b.replicas, err = pd.getCompactInt32Array(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type AlterPartitionReassignmentsRequest struct {
|
||||
TimeoutMs int32
|
||||
blocks map[string]map[int32]*alterPartitionReassignmentsBlock
|
||||
Version int16
|
||||
}
|
||||
|
||||
func (r *AlterPartitionReassignmentsRequest) encode(pe packetEncoder) error {
|
||||
pe.putInt32(r.TimeoutMs)
|
||||
|
||||
pe.putCompactArrayLength(len(r.blocks))
|
||||
|
||||
for topic, partitions := range r.blocks {
|
||||
if err := pe.putCompactString(topic); err != nil {
|
||||
return err
|
||||
}
|
||||
pe.putCompactArrayLength(len(partitions))
|
||||
for partition, block := range partitions {
|
||||
pe.putInt32(partition)
|
||||
if err := block.encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
pe.putEmptyTaggedFieldArray()
|
||||
}
|
||||
|
||||
pe.putEmptyTaggedFieldArray()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *AlterPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
r.Version = version
|
||||
|
||||
if r.TimeoutMs, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
topicCount, err := pd.getCompactArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if topicCount > 0 {
|
||||
r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock)
|
||||
for i := 0; i < topicCount; i++ {
|
||||
topic, err := pd.getCompactString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
partitionCount, err := pd.getCompactArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock)
|
||||
for j := 0; j < partitionCount; j++ {
|
||||
partition, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
block := &alterPartitionReassignmentsBlock{}
|
||||
if err := block.decode(pd); err != nil {
|
||||
return err
|
||||
}
|
||||
r.blocks[topic][partition] = block
|
||||
|
||||
if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (r *AlterPartitionReassignmentsRequest) key() int16 {
|
||||
return 45
|
||||
}
|
||||
|
||||
func (r *AlterPartitionReassignmentsRequest) version() int16 {
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *AlterPartitionReassignmentsRequest) headerVersion() int16 {
|
||||
return 2
|
||||
}
|
||||
|
||||
func (r *AlterPartitionReassignmentsRequest) requiredVersion() KafkaVersion {
|
||||
return V2_4_0_0
|
||||
}
|
||||
|
||||
func (r *AlterPartitionReassignmentsRequest) AddBlock(topic string, partitionID int32, replicas []int32) {
|
||||
if r.blocks == nil {
|
||||
r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock)
|
||||
}
|
||||
|
||||
if r.blocks[topic] == nil {
|
||||
r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock)
|
||||
}
|
||||
|
||||
r.blocks[topic][partitionID] = &alterPartitionReassignmentsBlock{replicas}
|
||||
}
|
157
vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go
generated
vendored
Normal file
157
vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go
generated
vendored
Normal file
|
@ -0,0 +1,157 @@
|
|||
package sarama
|
||||
|
||||
type alterPartitionReassignmentsErrorBlock struct {
|
||||
errorCode KError
|
||||
errorMessage *string
|
||||
}
|
||||
|
||||
func (b *alterPartitionReassignmentsErrorBlock) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(b.errorCode))
|
||||
if err := pe.putNullableCompactString(b.errorMessage); err != nil {
|
||||
return err
|
||||
}
|
||||
pe.putEmptyTaggedFieldArray()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *alterPartitionReassignmentsErrorBlock) decode(pd packetDecoder) (err error) {
|
||||
errorCode, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.errorCode = KError(errorCode)
|
||||
b.errorMessage, err = pd.getCompactNullableString()
|
||||
|
||||
if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type AlterPartitionReassignmentsResponse struct {
|
||||
Version int16
|
||||
ThrottleTimeMs int32
|
||||
ErrorCode KError
|
||||
ErrorMessage *string
|
||||
Errors map[string]map[int32]*alterPartitionReassignmentsErrorBlock
|
||||
}
|
||||
|
||||
func (r *AlterPartitionReassignmentsResponse) AddError(topic string, partition int32, kerror KError, message *string) {
|
||||
if r.Errors == nil {
|
||||
r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock)
|
||||
}
|
||||
partitions := r.Errors[topic]
|
||||
if partitions == nil {
|
||||
partitions = make(map[int32]*alterPartitionReassignmentsErrorBlock)
|
||||
r.Errors[topic] = partitions
|
||||
}
|
||||
|
||||
partitions[partition] = &alterPartitionReassignmentsErrorBlock{errorCode: kerror, errorMessage: message}
|
||||
}
|
||||
|
||||
func (r *AlterPartitionReassignmentsResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt32(r.ThrottleTimeMs)
|
||||
pe.putInt16(int16(r.ErrorCode))
|
||||
if err := pe.putNullableCompactString(r.ErrorMessage); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pe.putCompactArrayLength(len(r.Errors))
|
||||
for topic, partitions := range r.Errors {
|
||||
if err := pe.putCompactString(topic); err != nil {
|
||||
return err
|
||||
}
|
||||
pe.putCompactArrayLength(len(partitions))
|
||||
for partition, block := range partitions {
|
||||
pe.putInt32(partition)
|
||||
|
||||
if err := block.encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
pe.putEmptyTaggedFieldArray()
|
||||
}
|
||||
|
||||
pe.putEmptyTaggedFieldArray()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *AlterPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
r.Version = version
|
||||
|
||||
if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
kerr, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.ErrorCode = KError(kerr)
|
||||
|
||||
if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numTopics, err := pd.getCompactArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if numTopics > 0 {
|
||||
r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock, numTopics)
|
||||
for i := 0; i < numTopics; i++ {
|
||||
topic, err := pd.getCompactString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ongoingPartitionReassignments, err := pd.getCompactArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Errors[topic] = make(map[int32]*alterPartitionReassignmentsErrorBlock, ongoingPartitionReassignments)
|
||||
|
||||
for j := 0; j < ongoingPartitionReassignments; j++ {
|
||||
partition, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
block := &alterPartitionReassignmentsErrorBlock{}
|
||||
if err := block.decode(pd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Errors[topic][partition] = block
|
||||
}
|
||||
if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *AlterPartitionReassignmentsResponse) key() int16 {
|
||||
return 45
|
||||
}
|
||||
|
||||
func (r *AlterPartitionReassignmentsResponse) version() int16 {
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *AlterPartitionReassignmentsResponse) headerVersion() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (r *AlterPartitionReassignmentsResponse) requiredVersion() KafkaVersion {
|
||||
return V2_4_0_0
|
||||
}
|
|
@ -1,24 +1,29 @@
|
|||
package sarama
|
||||
|
||||
//ApiVersionsRequest ...
|
||||
type ApiVersionsRequest struct {
|
||||
}
|
||||
|
||||
func (r *ApiVersionsRequest) encode(pe packetEncoder) error {
|
||||
func (a *ApiVersionsRequest) encode(pe packetEncoder) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
func (a *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApiVersionsRequest) key() int16 {
|
||||
func (a *ApiVersionsRequest) key() int16 {
|
||||
return 18
|
||||
}
|
||||
|
||||
func (r *ApiVersionsRequest) version() int16 {
|
||||
func (a *ApiVersionsRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *ApiVersionsRequest) requiredVersion() KafkaVersion {
|
||||
func (a *ApiVersionsRequest) headerVersion() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (a *ApiVersionsRequest) requiredVersion() KafkaVersion {
|
||||
return V0_10_0_0
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
package sarama
|
||||
|
||||
//ApiVersionsResponseBlock is an api version response block type
|
||||
type ApiVersionsResponseBlock struct {
|
||||
ApiKey int16
|
||||
MinVersion int16
|
||||
|
@ -31,6 +32,7 @@ func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
//ApiVersionsResponse is an api version response type
|
||||
type ApiVersionsResponse struct {
|
||||
Err KError
|
||||
ApiVersions []*ApiVersionsResponseBlock
|
||||
|
@ -82,6 +84,10 @@ func (r *ApiVersionsResponse) version() int16 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (a *ApiVersionsResponse) headerVersion() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *ApiVersionsResponse) requiredVersion() KafkaVersion {
|
||||
return V0_10_0_0
|
||||
}
|
||||
|
|
|
@ -47,18 +47,78 @@ type AsyncProducer interface {
|
|||
Errors() <-chan *ProducerError
|
||||
}
|
||||
|
||||
// transactionManager keeps the state necessary to ensure idempotent production
|
||||
type transactionManager struct {
|
||||
producerID int64
|
||||
producerEpoch int16
|
||||
sequenceNumbers map[string]int32
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
const (
|
||||
noProducerID = -1
|
||||
noProducerEpoch = -1
|
||||
)
|
||||
|
||||
func (t *transactionManager) getAndIncrementSequenceNumber(topic string, partition int32) (int32, int16) {
|
||||
key := fmt.Sprintf("%s-%d", topic, partition)
|
||||
t.mutex.Lock()
|
||||
defer t.mutex.Unlock()
|
||||
sequence := t.sequenceNumbers[key]
|
||||
t.sequenceNumbers[key] = sequence + 1
|
||||
return sequence, t.producerEpoch
|
||||
}
|
||||
|
||||
func (t *transactionManager) bumpEpoch() {
|
||||
t.mutex.Lock()
|
||||
defer t.mutex.Unlock()
|
||||
t.producerEpoch++
|
||||
for k := range t.sequenceNumbers {
|
||||
t.sequenceNumbers[k] = 0
|
||||
}
|
||||
}
|
||||
|
||||
func (t *transactionManager) getProducerID() (int64, int16) {
|
||||
t.mutex.Lock()
|
||||
defer t.mutex.Unlock()
|
||||
return t.producerID, t.producerEpoch
|
||||
}
|
||||
|
||||
func newTransactionManager(conf *Config, client Client) (*transactionManager, error) {
|
||||
txnmgr := &transactionManager{
|
||||
producerID: noProducerID,
|
||||
producerEpoch: noProducerEpoch,
|
||||
}
|
||||
|
||||
if conf.Producer.Idempotent {
|
||||
initProducerIDResponse, err := client.InitProducerID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txnmgr.producerID = initProducerIDResponse.ProducerID
|
||||
txnmgr.producerEpoch = initProducerIDResponse.ProducerEpoch
|
||||
txnmgr.sequenceNumbers = make(map[string]int32)
|
||||
txnmgr.mutex = sync.Mutex{}
|
||||
|
||||
Logger.Printf("Obtained a ProducerId: %d and ProducerEpoch: %d\n", txnmgr.producerID, txnmgr.producerEpoch)
|
||||
}
|
||||
|
||||
return txnmgr, nil
|
||||
}
|
||||
|
||||
type asyncProducer struct {
|
||||
client Client
|
||||
conf *Config
|
||||
ownClient bool
|
||||
client Client
|
||||
conf *Config
|
||||
|
||||
errors chan *ProducerError
|
||||
input, successes, retries chan *ProducerMessage
|
||||
inFlight sync.WaitGroup
|
||||
|
||||
brokers map[*Broker]chan<- *ProducerMessage
|
||||
brokerRefs map[chan<- *ProducerMessage]int
|
||||
brokers map[*Broker]*brokerProducer
|
||||
brokerRefs map[*brokerProducer]int
|
||||
brokerLock sync.Mutex
|
||||
|
||||
txnmgr *transactionManager
|
||||
}
|
||||
|
||||
// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration.
|
||||
|
@ -67,23 +127,29 @@ func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p, err := NewAsyncProducerFromClient(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.(*asyncProducer).ownClient = true
|
||||
return p, nil
|
||||
return newAsyncProducer(client)
|
||||
}
|
||||
|
||||
// NewAsyncProducerFromClient creates a new Producer using the given client. It is still
|
||||
// necessary to call Close() on the underlying client when shutting down this producer.
|
||||
func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) {
|
||||
// For clients passed in by the client, ensure we don't
|
||||
// call Close() on it.
|
||||
cli := &nopCloserClient{client}
|
||||
return newAsyncProducer(cli)
|
||||
}
|
||||
|
||||
func newAsyncProducer(client Client) (AsyncProducer, error) {
|
||||
// Check that we are not dealing with a closed Client before processing any other arguments
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
txnmgr, err := newTransactionManager(client.Config(), client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p := &asyncProducer{
|
||||
client: client,
|
||||
conf: client.Config(),
|
||||
|
@ -91,8 +157,9 @@ func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) {
|
|||
input: make(chan *ProducerMessage),
|
||||
successes: make(chan *ProducerMessage),
|
||||
retries: make(chan *ProducerMessage),
|
||||
brokers: make(map[*Broker]chan<- *ProducerMessage),
|
||||
brokerRefs: make(map[chan<- *ProducerMessage]int),
|
||||
brokers: make(map[*Broker]*brokerProducer),
|
||||
brokerRefs: make(map[*brokerProducer]int),
|
||||
txnmgr: txnmgr,
|
||||
}
|
||||
|
||||
// launch our singleton dispatchers
|
||||
|
@ -139,15 +206,25 @@ type ProducerMessage struct {
|
|||
// Partition is the partition that the message was sent to. This is only
|
||||
// guaranteed to be defined if the message was successfully delivered.
|
||||
Partition int32
|
||||
// Timestamp is the timestamp assigned to the message by the broker. This
|
||||
// is only guaranteed to be defined if the message was successfully
|
||||
// delivered, RequiredAcks is not NoResponse, and the Kafka broker is at
|
||||
// least version 0.10.0.
|
||||
// Timestamp can vary in behaviour depending on broker configuration, being
|
||||
// in either one of the CreateTime or LogAppendTime modes (default CreateTime),
|
||||
// and requiring version at least 0.10.0.
|
||||
//
|
||||
// When configured to CreateTime, the timestamp is specified by the producer
|
||||
// either by explicitly setting this field, or when the message is added
|
||||
// to a produce set.
|
||||
//
|
||||
// When configured to LogAppendTime, the timestamp assigned to the message
|
||||
// by the broker. This is only guaranteed to be defined if the message was
|
||||
// successfully delivered and RequiredAcks is not NoResponse.
|
||||
Timestamp time.Time
|
||||
|
||||
retries int
|
||||
flags flagSet
|
||||
expectation chan *ProducerError
|
||||
retries int
|
||||
flags flagSet
|
||||
expectation chan *ProducerError
|
||||
sequenceNumber int32
|
||||
producerEpoch int16
|
||||
hasSequence bool
|
||||
}
|
||||
|
||||
const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc.
|
||||
|
@ -174,6 +251,9 @@ func (m *ProducerMessage) byteSize(version int) int {
|
|||
func (m *ProducerMessage) clear() {
|
||||
m.flags = 0
|
||||
m.retries = 0
|
||||
m.sequenceNumber = 0
|
||||
m.producerEpoch = 0
|
||||
m.hasSequence = false
|
||||
}
|
||||
|
||||
// ProducerError is the type of error generated when the producer fails to deliver a message.
|
||||
|
@ -187,6 +267,10 @@ func (pe ProducerError) Error() string {
|
|||
return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err)
|
||||
}
|
||||
|
||||
func (pe ProducerError) Unwrap() error {
|
||||
return pe.Err
|
||||
}
|
||||
|
||||
// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface.
|
||||
// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel
|
||||
// when closing a producer.
|
||||
|
@ -268,6 +352,10 @@ func (p *asyncProducer) dispatcher() {
|
|||
p.inFlight.Add(1)
|
||||
}
|
||||
|
||||
for _, interceptor := range p.conf.Producer.Interceptors {
|
||||
msg.safelyApplyInterceptor(interceptor)
|
||||
}
|
||||
|
||||
version := 1
|
||||
if p.conf.Version.IsAtLeast(V0_11_0_0) {
|
||||
version = 2
|
||||
|
@ -347,7 +435,7 @@ func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error {
|
|||
var partitions []int32
|
||||
|
||||
err := tp.breaker.Run(func() (err error) {
|
||||
var requiresConsistency = false
|
||||
requiresConsistency := false
|
||||
if ep, ok := tp.partitioner.(DynamicConsistencyPartitioner); ok {
|
||||
requiresConsistency = ep.MessageRequiresConsistency(msg)
|
||||
} else {
|
||||
|
@ -394,9 +482,9 @@ type partitionProducer struct {
|
|||
partition int32
|
||||
input <-chan *ProducerMessage
|
||||
|
||||
leader *Broker
|
||||
breaker *breaker.Breaker
|
||||
output chan<- *ProducerMessage
|
||||
leader *Broker
|
||||
breaker *breaker.Breaker
|
||||
brokerProducer *brokerProducer
|
||||
|
||||
// highWatermark tracks the "current" retry level, which is the only one where we actually let messages through,
|
||||
// all other messages get buffered in retryState[msg.retries].buf to preserve ordering
|
||||
|
@ -426,21 +514,53 @@ func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan
|
|||
return input
|
||||
}
|
||||
|
||||
func (pp *partitionProducer) backoff(retries int) {
|
||||
var backoff time.Duration
|
||||
if pp.parent.conf.Producer.Retry.BackoffFunc != nil {
|
||||
maxRetries := pp.parent.conf.Producer.Retry.Max
|
||||
backoff = pp.parent.conf.Producer.Retry.BackoffFunc(retries, maxRetries)
|
||||
} else {
|
||||
backoff = pp.parent.conf.Producer.Retry.Backoff
|
||||
}
|
||||
if backoff > 0 {
|
||||
time.Sleep(backoff)
|
||||
}
|
||||
}
|
||||
|
||||
func (pp *partitionProducer) dispatch() {
|
||||
// try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader`
|
||||
// on the first message
|
||||
pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition)
|
||||
if pp.leader != nil {
|
||||
pp.output = pp.parent.getBrokerProducer(pp.leader)
|
||||
pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader)
|
||||
pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
|
||||
pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
|
||||
pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if pp.brokerProducer != nil {
|
||||
pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
|
||||
}
|
||||
}()
|
||||
|
||||
for msg := range pp.input {
|
||||
if pp.brokerProducer != nil && pp.brokerProducer.abandoned != nil {
|
||||
select {
|
||||
case <-pp.brokerProducer.abandoned:
|
||||
// a message on the abandoned channel means that our current broker selection is out of date
|
||||
Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
|
||||
pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
|
||||
pp.brokerProducer = nil
|
||||
time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
|
||||
default:
|
||||
// producer connection is still open.
|
||||
}
|
||||
}
|
||||
|
||||
if msg.retries > pp.highWatermark {
|
||||
// a new, higher, retry level; handle it and then back off
|
||||
pp.newHighWatermark(msg.retries)
|
||||
time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
|
||||
pp.backoff(msg.retries)
|
||||
} else if pp.highWatermark > 0 {
|
||||
// we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level
|
||||
if msg.retries < pp.highWatermark {
|
||||
|
@ -465,20 +585,25 @@ func (pp *partitionProducer) dispatch() {
|
|||
// if we made it this far then the current msg contains real data, and can be sent to the next goroutine
|
||||
// without breaking any of our ordering guarantees
|
||||
|
||||
if pp.output == nil {
|
||||
if pp.brokerProducer == nil {
|
||||
if err := pp.updateLeader(); err != nil {
|
||||
pp.parent.returnError(msg, err)
|
||||
time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
|
||||
pp.backoff(msg.retries)
|
||||
continue
|
||||
}
|
||||
Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
|
||||
}
|
||||
|
||||
pp.output <- msg
|
||||
}
|
||||
// Now that we know we have a broker to actually try and send this message to, generate the sequence
|
||||
// number for it.
|
||||
// All messages being retried (sent or not) have already had their retry count updated
|
||||
// Also, ignore "special" syn/fin messages used to sync the brokerProducer and the topicProducer.
|
||||
if pp.parent.conf.Producer.Idempotent && msg.retries == 0 && msg.flags == 0 {
|
||||
msg.sequenceNumber, msg.producerEpoch = pp.parent.txnmgr.getAndIncrementSequenceNumber(msg.Topic, msg.Partition)
|
||||
msg.hasSequence = true
|
||||
}
|
||||
|
||||
if pp.output != nil {
|
||||
pp.parent.unrefBrokerProducer(pp.leader, pp.output)
|
||||
pp.brokerProducer.input <- msg
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -490,12 +615,12 @@ func (pp *partitionProducer) newHighWatermark(hwm int) {
|
|||
// back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
|
||||
pp.retryState[pp.highWatermark].expectChaser = true
|
||||
pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight
|
||||
pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1}
|
||||
pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1}
|
||||
|
||||
// a new HWM means that our current broker selection is out of date
|
||||
Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
|
||||
pp.parent.unrefBrokerProducer(pp.leader, pp.output)
|
||||
pp.output = nil
|
||||
pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
|
||||
pp.brokerProducer = nil
|
||||
}
|
||||
|
||||
func (pp *partitionProducer) flushRetryBuffers() {
|
||||
|
@ -503,7 +628,7 @@ func (pp *partitionProducer) flushRetryBuffers() {
|
|||
for {
|
||||
pp.highWatermark--
|
||||
|
||||
if pp.output == nil {
|
||||
if pp.brokerProducer == nil {
|
||||
if err := pp.updateLeader(); err != nil {
|
||||
pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err)
|
||||
goto flushDone
|
||||
|
@ -512,7 +637,7 @@ func (pp *partitionProducer) flushRetryBuffers() {
|
|||
}
|
||||
|
||||
for _, msg := range pp.retryState[pp.highWatermark].buf {
|
||||
pp.output <- msg
|
||||
pp.brokerProducer.input <- msg
|
||||
}
|
||||
|
||||
flushDone:
|
||||
|
@ -537,16 +662,16 @@ func (pp *partitionProducer) updateLeader() error {
|
|||
return err
|
||||
}
|
||||
|
||||
pp.output = pp.parent.getBrokerProducer(pp.leader)
|
||||
pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader)
|
||||
pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
|
||||
pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
|
||||
pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// one per broker; also constructs an associated flusher
|
||||
func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessage {
|
||||
func (p *asyncProducer) newBrokerProducer(broker *Broker) *brokerProducer {
|
||||
var (
|
||||
input = make(chan *ProducerMessage)
|
||||
bridge = make(chan *produceSet)
|
||||
|
@ -559,6 +684,7 @@ func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessag
|
|||
input: input,
|
||||
output: bridge,
|
||||
responses: responses,
|
||||
stopchan: make(chan struct{}),
|
||||
buffer: newProduceSet(p),
|
||||
currentRetries: make(map[string]map[int32]error),
|
||||
}
|
||||
|
@ -580,7 +706,11 @@ func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessag
|
|||
close(responses)
|
||||
})
|
||||
|
||||
return input
|
||||
if p.conf.Producer.Retry.Max <= 0 {
|
||||
bp.abandoned = make(chan struct{})
|
||||
}
|
||||
|
||||
return bp
|
||||
}
|
||||
|
||||
type brokerProducerResponse struct {
|
||||
|
@ -595,9 +725,11 @@ type brokerProducer struct {
|
|||
parent *asyncProducer
|
||||
broker *Broker
|
||||
|
||||
input <-chan *ProducerMessage
|
||||
input chan *ProducerMessage
|
||||
output chan<- *produceSet
|
||||
responses <-chan *brokerProducerResponse
|
||||
abandoned chan struct{}
|
||||
stopchan chan struct{}
|
||||
|
||||
buffer *produceSet
|
||||
timer <-chan time.Time
|
||||
|
@ -613,12 +745,17 @@ func (bp *brokerProducer) run() {
|
|||
|
||||
for {
|
||||
select {
|
||||
case msg := <-bp.input:
|
||||
if msg == nil {
|
||||
case msg, ok := <-bp.input:
|
||||
if !ok {
|
||||
Logger.Printf("producer/broker/%d input chan closed\n", bp.broker.ID())
|
||||
bp.shutdown()
|
||||
return
|
||||
}
|
||||
|
||||
if msg == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if msg.flags&syn == syn {
|
||||
Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n",
|
||||
bp.broker.ID(), msg.Topic, msg.Partition)
|
||||
|
@ -644,12 +781,21 @@ func (bp *brokerProducer) run() {
|
|||
}
|
||||
|
||||
if bp.buffer.wouldOverflow(msg) {
|
||||
if err := bp.waitForSpace(msg); err != nil {
|
||||
Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID())
|
||||
if err := bp.waitForSpace(msg, false); err != nil {
|
||||
bp.parent.retryMessage(msg, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if bp.parent.txnmgr.producerID != noProducerID && bp.buffer.producerEpoch != msg.producerEpoch {
|
||||
// The epoch was reset, need to roll the buffer over
|
||||
Logger.Printf("producer/broker/%d detected epoch rollover, waiting for new buffer\n", bp.broker.ID())
|
||||
if err := bp.waitForSpace(msg, true); err != nil {
|
||||
bp.parent.retryMessage(msg, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if err := bp.buffer.add(msg); err != nil {
|
||||
bp.parent.returnError(msg, err)
|
||||
continue
|
||||
|
@ -662,8 +808,14 @@ func (bp *brokerProducer) run() {
|
|||
bp.timerFired = true
|
||||
case output <- bp.buffer:
|
||||
bp.rollOver()
|
||||
case response := <-bp.responses:
|
||||
bp.handleResponse(response)
|
||||
case response, ok := <-bp.responses:
|
||||
if ok {
|
||||
bp.handleResponse(response)
|
||||
}
|
||||
case <-bp.stopchan:
|
||||
Logger.Printf(
|
||||
"producer/broker/%d run loop asked to stop\n", bp.broker.ID())
|
||||
return
|
||||
}
|
||||
|
||||
if bp.timerFired || bp.buffer.readyToFlush() {
|
||||
|
@ -687,7 +839,7 @@ func (bp *brokerProducer) shutdown() {
|
|||
for response := range bp.responses {
|
||||
bp.handleResponse(response)
|
||||
}
|
||||
|
||||
close(bp.stopchan)
|
||||
Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID())
|
||||
}
|
||||
|
||||
|
@ -699,9 +851,7 @@ func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error {
|
|||
return bp.currentRetries[msg.Topic][msg.Partition]
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error {
|
||||
Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID())
|
||||
|
||||
func (bp *brokerProducer) waitForSpace(msg *ProducerMessage, forceRollover bool) error {
|
||||
for {
|
||||
select {
|
||||
case response := <-bp.responses:
|
||||
|
@ -709,7 +859,7 @@ func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error {
|
|||
// handling a response can change our state, so re-check some things
|
||||
if reason := bp.needsRetry(msg); reason != nil {
|
||||
return reason
|
||||
} else if !bp.buffer.wouldOverflow(msg) {
|
||||
} else if !bp.buffer.wouldOverflow(msg) && !forceRollover {
|
||||
return nil
|
||||
}
|
||||
case bp.output <- bp.buffer:
|
||||
|
@ -740,16 +890,17 @@ func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) {
|
|||
func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) {
|
||||
// we iterate through the blocks in the request set, not the response, so that we notice
|
||||
// if the response is missing a block completely
|
||||
sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
|
||||
var retryTopics []string
|
||||
sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
|
||||
if response == nil {
|
||||
// this only happens when RequiredAcks is NoResponse, so we have to assume success
|
||||
bp.parent.returnSuccesses(msgs)
|
||||
bp.parent.returnSuccesses(pSet.msgs)
|
||||
return
|
||||
}
|
||||
|
||||
block := response.GetBlock(topic, partition)
|
||||
if block == nil {
|
||||
bp.parent.returnErrors(msgs, ErrIncompleteResponse)
|
||||
bp.parent.returnErrors(pSet.msgs, ErrIncompleteResponse)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -757,45 +908,115 @@ func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceRespo
|
|||
// Success
|
||||
case ErrNoError:
|
||||
if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() {
|
||||
for _, msg := range msgs {
|
||||
for _, msg := range pSet.msgs {
|
||||
msg.Timestamp = block.Timestamp
|
||||
}
|
||||
}
|
||||
for i, msg := range msgs {
|
||||
for i, msg := range pSet.msgs {
|
||||
msg.Offset = block.Offset + int64(i)
|
||||
}
|
||||
bp.parent.returnSuccesses(msgs)
|
||||
bp.parent.returnSuccesses(pSet.msgs)
|
||||
// Duplicate
|
||||
case ErrDuplicateSequenceNumber:
|
||||
bp.parent.returnSuccesses(pSet.msgs)
|
||||
// Retriable errors
|
||||
case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
|
||||
ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
|
||||
Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n",
|
||||
bp.broker.ID(), topic, partition, block.Err)
|
||||
bp.currentRetries[topic][partition] = block.Err
|
||||
bp.parent.retryMessages(msgs, block.Err)
|
||||
bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err)
|
||||
if bp.parent.conf.Producer.Retry.Max <= 0 {
|
||||
bp.parent.abandonBrokerConnection(bp.broker)
|
||||
bp.parent.returnErrors(pSet.msgs, block.Err)
|
||||
} else {
|
||||
retryTopics = append(retryTopics, topic)
|
||||
}
|
||||
// Other non-retriable errors
|
||||
default:
|
||||
bp.parent.returnErrors(msgs, block.Err)
|
||||
if bp.parent.conf.Producer.Retry.Max <= 0 {
|
||||
bp.parent.abandonBrokerConnection(bp.broker)
|
||||
}
|
||||
bp.parent.returnErrors(pSet.msgs, block.Err)
|
||||
}
|
||||
})
|
||||
|
||||
if len(retryTopics) > 0 {
|
||||
if bp.parent.conf.Producer.Idempotent {
|
||||
err := bp.parent.client.RefreshMetadata(retryTopics...)
|
||||
if err != nil {
|
||||
Logger.Printf("Failed refreshing metadata because of %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
|
||||
block := response.GetBlock(topic, partition)
|
||||
if block == nil {
|
||||
// handled in the previous "eachPartition" loop
|
||||
return
|
||||
}
|
||||
|
||||
switch block.Err {
|
||||
case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
|
||||
ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
|
||||
Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n",
|
||||
bp.broker.ID(), topic, partition, block.Err)
|
||||
if bp.currentRetries[topic] == nil {
|
||||
bp.currentRetries[topic] = make(map[int32]error)
|
||||
}
|
||||
bp.currentRetries[topic][partition] = block.Err
|
||||
if bp.parent.conf.Producer.Idempotent {
|
||||
go bp.parent.retryBatch(topic, partition, pSet, block.Err)
|
||||
} else {
|
||||
bp.parent.retryMessages(pSet.msgs, block.Err)
|
||||
}
|
||||
// dropping the following messages has the side effect of incrementing their retry count
|
||||
bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (p *asyncProducer) retryBatch(topic string, partition int32, pSet *partitionSet, kerr KError) {
|
||||
Logger.Printf("Retrying batch for %v-%d because of %s\n", topic, partition, kerr)
|
||||
produceSet := newProduceSet(p)
|
||||
produceSet.msgs[topic] = make(map[int32]*partitionSet)
|
||||
produceSet.msgs[topic][partition] = pSet
|
||||
produceSet.bufferBytes += pSet.bufferBytes
|
||||
produceSet.bufferCount += len(pSet.msgs)
|
||||
for _, msg := range pSet.msgs {
|
||||
if msg.retries >= p.conf.Producer.Retry.Max {
|
||||
p.returnError(msg, kerr)
|
||||
return
|
||||
}
|
||||
msg.retries++
|
||||
}
|
||||
|
||||
// it's expected that a metadata refresh has been requested prior to calling retryBatch
|
||||
leader, err := p.client.Leader(topic, partition)
|
||||
if err != nil {
|
||||
Logger.Printf("Failed retrying batch for %v-%d because of %v while looking up for new leader\n", topic, partition, err)
|
||||
for _, msg := range pSet.msgs {
|
||||
p.returnError(msg, kerr)
|
||||
}
|
||||
return
|
||||
}
|
||||
bp := p.getBrokerProducer(leader)
|
||||
bp.output <- produceSet
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) handleError(sent *produceSet, err error) {
|
||||
switch err.(type) {
|
||||
case PacketEncodingError:
|
||||
sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
|
||||
bp.parent.returnErrors(msgs, err)
|
||||
sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
|
||||
bp.parent.returnErrors(pSet.msgs, err)
|
||||
})
|
||||
default:
|
||||
Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err)
|
||||
bp.parent.abandonBrokerConnection(bp.broker)
|
||||
_ = bp.broker.Close()
|
||||
bp.closing = err
|
||||
sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
|
||||
bp.parent.retryMessages(msgs, err)
|
||||
sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
|
||||
bp.parent.retryMessages(pSet.msgs, err)
|
||||
})
|
||||
bp.buffer.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
|
||||
bp.parent.retryMessages(msgs, err)
|
||||
bp.buffer.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
|
||||
bp.parent.retryMessages(pSet.msgs, err)
|
||||
})
|
||||
bp.rollOver()
|
||||
}
|
||||
|
@ -837,11 +1058,9 @@ func (p *asyncProducer) shutdown() {
|
|||
|
||||
p.inFlight.Wait()
|
||||
|
||||
if p.ownClient {
|
||||
err := p.client.Close()
|
||||
if err != nil {
|
||||
Logger.Println("producer/shutdown failed to close the embedded client:", err)
|
||||
}
|
||||
err := p.client.Close()
|
||||
if err != nil {
|
||||
Logger.Println("producer/shutdown failed to close the embedded client:", err)
|
||||
}
|
||||
|
||||
close(p.input)
|
||||
|
@ -851,6 +1070,12 @@ func (p *asyncProducer) shutdown() {
|
|||
}
|
||||
|
||||
func (p *asyncProducer) returnError(msg *ProducerMessage, err error) {
|
||||
// We need to reset the producer ID epoch if we set a sequence number on it, because the broker
|
||||
// will never see a message with this number, so we can never continue the sequence.
|
||||
if msg.hasSequence {
|
||||
Logger.Printf("producer/txnmanager rolling over epoch due to publish failure on %s/%d", msg.Topic, msg.Partition)
|
||||
p.txnmgr.bumpEpoch()
|
||||
}
|
||||
msg.clear()
|
||||
pErr := &ProducerError{Msg: msg, Err: err}
|
||||
if p.conf.Producer.Return.Errors {
|
||||
|
@ -892,7 +1117,7 @@ func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) {
|
|||
}
|
||||
}
|
||||
|
||||
func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessage {
|
||||
func (p *asyncProducer) getBrokerProducer(broker *Broker) *brokerProducer {
|
||||
p.brokerLock.Lock()
|
||||
defer p.brokerLock.Unlock()
|
||||
|
||||
|
@ -909,13 +1134,13 @@ func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessag
|
|||
return bp
|
||||
}
|
||||
|
||||
func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp chan<- *ProducerMessage) {
|
||||
func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp *brokerProducer) {
|
||||
p.brokerLock.Lock()
|
||||
defer p.brokerLock.Unlock()
|
||||
|
||||
p.brokerRefs[bp]--
|
||||
if p.brokerRefs[bp] == 0 {
|
||||
close(bp)
|
||||
close(bp.input)
|
||||
delete(p.brokerRefs, bp)
|
||||
|
||||
if p.brokers[broker] == bp {
|
||||
|
@ -928,5 +1153,10 @@ func (p *asyncProducer) abandonBrokerConnection(broker *Broker) {
|
|||
p.brokerLock.Lock()
|
||||
defer p.brokerLock.Unlock()
|
||||
|
||||
bc, ok := p.brokers[broker]
|
||||
if ok && bc.abandoned != nil {
|
||||
close(bc.abandoned)
|
||||
}
|
||||
|
||||
delete(p.brokers, broker)
|
||||
}
|
||||
|
|
|
@ -1,8 +1,23 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// RangeBalanceStrategyName identifies strategies that use the range partition assignment strategy
|
||||
RangeBalanceStrategyName = "range"
|
||||
|
||||
// RoundRobinBalanceStrategyName identifies strategies that use the round-robin partition assignment strategy
|
||||
RoundRobinBalanceStrategyName = "roundrobin"
|
||||
|
||||
// StickyBalanceStrategyName identifies strategies that use the sticky-partition assignment strategy
|
||||
StickyBalanceStrategyName = "sticky"
|
||||
|
||||
defaultGeneration = -1
|
||||
)
|
||||
|
||||
// BalanceStrategyPlan is the results of any BalanceStrategy.Plan attempt.
|
||||
|
@ -24,7 +39,7 @@ func (p BalanceStrategyPlan) Add(memberID, topic string, partitions ...int32) {
|
|||
// --------------------------------------------------------------------
|
||||
|
||||
// BalanceStrategy is used to balance topics and partitions
|
||||
// across memebers of a consumer group
|
||||
// across members of a consumer group
|
||||
type BalanceStrategy interface {
|
||||
// Name uniquely identifies the strategy.
|
||||
Name() string
|
||||
|
@ -32,6 +47,10 @@ type BalanceStrategy interface {
|
|||
// Plan accepts a map of `memberID -> metadata` and a map of `topic -> partitions`
|
||||
// and returns a distribution plan.
|
||||
Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error)
|
||||
|
||||
// AssignmentData returns the serialized assignment data for the specified
|
||||
// memberID
|
||||
AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error)
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------
|
||||
|
@ -41,7 +60,7 @@ type BalanceStrategy interface {
|
|||
// M1: {T: [0, 1, 2]}
|
||||
// M2: {T: [3, 4, 5]}
|
||||
var BalanceStrategyRange = &balanceStrategy{
|
||||
name: "range",
|
||||
name: RangeBalanceStrategyName,
|
||||
coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) {
|
||||
step := float64(len(partitions)) / float64(len(memberIDs))
|
||||
|
||||
|
@ -59,7 +78,7 @@ var BalanceStrategyRange = &balanceStrategy{
|
|||
// M1: {T: [0, 2, 4]}
|
||||
// M2: {T: [1, 3, 5]}
|
||||
var BalanceStrategyRoundRobin = &balanceStrategy{
|
||||
name: "roundrobin",
|
||||
name: RoundRobinBalanceStrategyName,
|
||||
coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) {
|
||||
for i, part := range partitions {
|
||||
memberID := memberIDs[i%len(memberIDs)]
|
||||
|
@ -68,6 +87,19 @@ var BalanceStrategyRoundRobin = &balanceStrategy{
|
|||
},
|
||||
}
|
||||
|
||||
// BalanceStrategySticky assigns partitions to members with an attempt to preserve earlier assignments
|
||||
// while maintain a balanced partition distribution.
|
||||
// Example with topic T with six partitions (0..5) and two members (M1, M2):
|
||||
// M1: {T: [0, 2, 4]}
|
||||
// M2: {T: [1, 3, 5]}
|
||||
//
|
||||
// On reassignment with an additional consumer, you might get an assignment plan like:
|
||||
// M1: {T: [0, 2]}
|
||||
// M2: {T: [1, 3]}
|
||||
// M3: {T: [4, 5]}
|
||||
//
|
||||
var BalanceStrategySticky = &stickyBalanceStrategy{}
|
||||
|
||||
// --------------------------------------------------------------------
|
||||
|
||||
type balanceStrategy struct {
|
||||
|
@ -78,7 +110,7 @@ type balanceStrategy struct {
|
|||
// Name implements BalanceStrategy.
|
||||
func (s *balanceStrategy) Name() string { return s.name }
|
||||
|
||||
// Balance implements BalanceStrategy.
|
||||
// Plan implements BalanceStrategy.
|
||||
func (s *balanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) {
|
||||
// Build members by topic map
|
||||
mbt := make(map[string][]string)
|
||||
|
@ -104,6 +136,11 @@ func (s *balanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, t
|
|||
return plan, nil
|
||||
}
|
||||
|
||||
// AssignmentData simple strategies do not require any shared assignment data
|
||||
func (s *balanceStrategy) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type balanceStrategySortable struct {
|
||||
topic string
|
||||
memberIDs []string
|
||||
|
@ -127,3 +164,898 @@ func balanceStrategyHashValue(vv ...string) uint32 {
|
|||
}
|
||||
return h
|
||||
}
|
||||
|
||||
type stickyBalanceStrategy struct {
|
||||
movements partitionMovements
|
||||
}
|
||||
|
||||
// Name implements BalanceStrategy.
|
||||
func (s *stickyBalanceStrategy) Name() string { return StickyBalanceStrategyName }
|
||||
|
||||
// Plan implements BalanceStrategy.
|
||||
func (s *stickyBalanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) {
|
||||
// track partition movements during generation of the partition assignment plan
|
||||
s.movements = partitionMovements{
|
||||
Movements: make(map[topicPartitionAssignment]consumerPair),
|
||||
PartitionMovementsByTopic: make(map[string]map[consumerPair]map[topicPartitionAssignment]bool),
|
||||
}
|
||||
|
||||
// prepopulate the current assignment state from userdata on the consumer group members
|
||||
currentAssignment, prevAssignment, err := prepopulateCurrentAssignments(members)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// determine if we're dealing with a completely fresh assignment, or if there's existing assignment state
|
||||
isFreshAssignment := false
|
||||
if len(currentAssignment) == 0 {
|
||||
isFreshAssignment = true
|
||||
}
|
||||
|
||||
// create a mapping of all current topic partitions and the consumers that can be assigned to them
|
||||
partition2AllPotentialConsumers := make(map[topicPartitionAssignment][]string)
|
||||
for topic, partitions := range topics {
|
||||
for _, partition := range partitions {
|
||||
partition2AllPotentialConsumers[topicPartitionAssignment{Topic: topic, Partition: partition}] = []string{}
|
||||
}
|
||||
}
|
||||
|
||||
// create a mapping of all consumers to all potential topic partitions that can be assigned to them
|
||||
// also, populate the mapping of partitions to potential consumers
|
||||
consumer2AllPotentialPartitions := make(map[string][]topicPartitionAssignment, len(members))
|
||||
for memberID, meta := range members {
|
||||
consumer2AllPotentialPartitions[memberID] = make([]topicPartitionAssignment, 0)
|
||||
for _, topicSubscription := range meta.Topics {
|
||||
// only evaluate topic subscriptions that are present in the supplied topics map
|
||||
if _, found := topics[topicSubscription]; found {
|
||||
for _, partition := range topics[topicSubscription] {
|
||||
topicPartition := topicPartitionAssignment{Topic: topicSubscription, Partition: partition}
|
||||
consumer2AllPotentialPartitions[memberID] = append(consumer2AllPotentialPartitions[memberID], topicPartition)
|
||||
partition2AllPotentialConsumers[topicPartition] = append(partition2AllPotentialConsumers[topicPartition], memberID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// add this consumer to currentAssignment (with an empty topic partition assignment) if it does not already exist
|
||||
if _, exists := currentAssignment[memberID]; !exists {
|
||||
currentAssignment[memberID] = make([]topicPartitionAssignment, 0)
|
||||
}
|
||||
}
|
||||
|
||||
// create a mapping of each partition to its current consumer, where possible
|
||||
currentPartitionConsumers := make(map[topicPartitionAssignment]string, len(currentAssignment))
|
||||
unvisitedPartitions := make(map[topicPartitionAssignment]bool, len(partition2AllPotentialConsumers))
|
||||
for partition := range partition2AllPotentialConsumers {
|
||||
unvisitedPartitions[partition] = true
|
||||
}
|
||||
var unassignedPartitions []topicPartitionAssignment
|
||||
for memberID, partitions := range currentAssignment {
|
||||
var keepPartitions []topicPartitionAssignment
|
||||
for _, partition := range partitions {
|
||||
// If this partition no longer exists at all, likely due to the
|
||||
// topic being deleted, we remove the partition from the member.
|
||||
if _, exists := partition2AllPotentialConsumers[partition]; !exists {
|
||||
continue
|
||||
}
|
||||
delete(unvisitedPartitions, partition)
|
||||
currentPartitionConsumers[partition] = memberID
|
||||
|
||||
if !strsContains(members[memberID].Topics, partition.Topic) {
|
||||
unassignedPartitions = append(unassignedPartitions, partition)
|
||||
continue
|
||||
}
|
||||
keepPartitions = append(keepPartitions, partition)
|
||||
}
|
||||
currentAssignment[memberID] = keepPartitions
|
||||
}
|
||||
for unvisited := range unvisitedPartitions {
|
||||
unassignedPartitions = append(unassignedPartitions, unvisited)
|
||||
}
|
||||
|
||||
// sort the topic partitions in order of priority for reassignment
|
||||
sortedPartitions := sortPartitions(currentAssignment, prevAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions)
|
||||
|
||||
// at this point we have preserved all valid topic partition to consumer assignments and removed
|
||||
// all invalid topic partitions and invalid consumers. Now we need to assign unassignedPartitions
|
||||
// to consumers so that the topic partition assignments are as balanced as possible.
|
||||
|
||||
// an ascending sorted set of consumers based on how many topic partitions are already assigned to them
|
||||
sortedCurrentSubscriptions := sortMemberIDsByPartitionAssignments(currentAssignment)
|
||||
s.balance(currentAssignment, prevAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumers)
|
||||
|
||||
// Assemble plan
|
||||
plan := make(BalanceStrategyPlan, len(currentAssignment))
|
||||
for memberID, assignments := range currentAssignment {
|
||||
if len(assignments) == 0 {
|
||||
plan[memberID] = make(map[string][]int32)
|
||||
} else {
|
||||
for _, assignment := range assignments {
|
||||
plan.Add(memberID, assignment.Topic, assignment.Partition)
|
||||
}
|
||||
}
|
||||
}
|
||||
return plan, nil
|
||||
}
|
||||
|
||||
// AssignmentData serializes the set of topics currently assigned to the
|
||||
// specified member as part of the supplied balance plan
|
||||
func (s *stickyBalanceStrategy) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) {
|
||||
return encode(&StickyAssignorUserDataV1{
|
||||
Topics: topics,
|
||||
Generation: generationID,
|
||||
}, nil)
|
||||
}
|
||||
|
||||
func strsContains(s []string, value string) bool {
|
||||
for _, entry := range s {
|
||||
if entry == value {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Balance assignments across consumers for maximum fairness and stickiness.
|
||||
func (s *stickyBalanceStrategy) balance(currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedPartitions []topicPartitionAssignment, unassignedPartitions []topicPartitionAssignment, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) {
|
||||
initializing := false
|
||||
if len(sortedCurrentSubscriptions) == 0 || len(currentAssignment[sortedCurrentSubscriptions[0]]) == 0 {
|
||||
initializing = true
|
||||
}
|
||||
|
||||
// assign all unassigned partitions
|
||||
for _, partition := range unassignedPartitions {
|
||||
// skip if there is no potential consumer for the partition
|
||||
if len(partition2AllPotentialConsumers[partition]) == 0 {
|
||||
continue
|
||||
}
|
||||
sortedCurrentSubscriptions = assignPartition(partition, sortedCurrentSubscriptions, currentAssignment, consumer2AllPotentialPartitions, currentPartitionConsumer)
|
||||
}
|
||||
|
||||
// narrow down the reassignment scope to only those partitions that can actually be reassigned
|
||||
for partition := range partition2AllPotentialConsumers {
|
||||
if !canTopicPartitionParticipateInReassignment(partition, partition2AllPotentialConsumers) {
|
||||
sortedPartitions = removeTopicPartitionFromMemberAssignments(sortedPartitions, partition)
|
||||
}
|
||||
}
|
||||
|
||||
// narrow down the reassignment scope to only those consumers that are subject to reassignment
|
||||
fixedAssignments := make(map[string][]topicPartitionAssignment)
|
||||
for memberID := range consumer2AllPotentialPartitions {
|
||||
if !canConsumerParticipateInReassignment(memberID, currentAssignment, consumer2AllPotentialPartitions, partition2AllPotentialConsumers) {
|
||||
fixedAssignments[memberID] = currentAssignment[memberID]
|
||||
delete(currentAssignment, memberID)
|
||||
sortedCurrentSubscriptions = sortMemberIDsByPartitionAssignments(currentAssignment)
|
||||
}
|
||||
}
|
||||
|
||||
// create a deep copy of the current assignment so we can revert to it if we do not get a more balanced assignment later
|
||||
preBalanceAssignment := deepCopyAssignment(currentAssignment)
|
||||
preBalancePartitionConsumers := make(map[topicPartitionAssignment]string, len(currentPartitionConsumer))
|
||||
for k, v := range currentPartitionConsumer {
|
||||
preBalancePartitionConsumers[k] = v
|
||||
}
|
||||
|
||||
reassignmentPerformed := s.performReassignments(sortedPartitions, currentAssignment, prevAssignment, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer)
|
||||
|
||||
// if we are not preserving existing assignments and we have made changes to the current assignment
|
||||
// make sure we are getting a more balanced assignment; otherwise, revert to previous assignment
|
||||
if !initializing && reassignmentPerformed && getBalanceScore(currentAssignment) >= getBalanceScore(preBalanceAssignment) {
|
||||
currentAssignment = deepCopyAssignment(preBalanceAssignment)
|
||||
currentPartitionConsumer = make(map[topicPartitionAssignment]string, len(preBalancePartitionConsumers))
|
||||
for k, v := range preBalancePartitionConsumers {
|
||||
currentPartitionConsumer[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// add the fixed assignments (those that could not change) back
|
||||
for consumer, assignments := range fixedAssignments {
|
||||
currentAssignment[consumer] = assignments
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate the balance score of the given assignment, as the sum of assigned partitions size difference of all consumer pairs.
|
||||
// A perfectly balanced assignment (with all consumers getting the same number of partitions) has a balance score of 0.
|
||||
// Lower balance score indicates a more balanced assignment.
|
||||
func getBalanceScore(assignment map[string][]topicPartitionAssignment) int {
|
||||
consumer2AssignmentSize := make(map[string]int, len(assignment))
|
||||
for memberID, partitions := range assignment {
|
||||
consumer2AssignmentSize[memberID] = len(partitions)
|
||||
}
|
||||
|
||||
var score float64
|
||||
for memberID, consumerAssignmentSize := range consumer2AssignmentSize {
|
||||
delete(consumer2AssignmentSize, memberID)
|
||||
for _, otherConsumerAssignmentSize := range consumer2AssignmentSize {
|
||||
score += math.Abs(float64(consumerAssignmentSize - otherConsumerAssignmentSize))
|
||||
}
|
||||
}
|
||||
return int(score)
|
||||
}
|
||||
|
||||
// Determine whether the current assignment plan is balanced.
|
||||
func isBalanced(currentAssignment map[string][]topicPartitionAssignment, allSubscriptions map[string][]topicPartitionAssignment) bool {
|
||||
sortedCurrentSubscriptions := sortMemberIDsByPartitionAssignments(currentAssignment)
|
||||
min := len(currentAssignment[sortedCurrentSubscriptions[0]])
|
||||
max := len(currentAssignment[sortedCurrentSubscriptions[len(sortedCurrentSubscriptions)-1]])
|
||||
if min >= max-1 {
|
||||
// if minimum and maximum numbers of partitions assigned to consumers differ by at most one return true
|
||||
return true
|
||||
}
|
||||
|
||||
// create a mapping from partitions to the consumer assigned to them
|
||||
allPartitions := make(map[topicPartitionAssignment]string)
|
||||
for memberID, partitions := range currentAssignment {
|
||||
for _, partition := range partitions {
|
||||
if _, exists := allPartitions[partition]; exists {
|
||||
Logger.Printf("Topic %s Partition %d is assigned more than one consumer", partition.Topic, partition.Partition)
|
||||
}
|
||||
allPartitions[partition] = memberID
|
||||
}
|
||||
}
|
||||
|
||||
// for each consumer that does not have all the topic partitions it can get make sure none of the topic partitions it
|
||||
// could but did not get cannot be moved to it (because that would break the balance)
|
||||
for _, memberID := range sortedCurrentSubscriptions {
|
||||
consumerPartitions := currentAssignment[memberID]
|
||||
consumerPartitionCount := len(consumerPartitions)
|
||||
|
||||
// skip if this consumer already has all the topic partitions it can get
|
||||
if consumerPartitionCount == len(allSubscriptions[memberID]) {
|
||||
continue
|
||||
}
|
||||
|
||||
// otherwise make sure it cannot get any more
|
||||
potentialTopicPartitions := allSubscriptions[memberID]
|
||||
for _, partition := range potentialTopicPartitions {
|
||||
if !memberAssignmentsIncludeTopicPartition(currentAssignment[memberID], partition) {
|
||||
otherConsumer := allPartitions[partition]
|
||||
otherConsumerPartitionCount := len(currentAssignment[otherConsumer])
|
||||
if consumerPartitionCount < otherConsumerPartitionCount {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Reassign all topic partitions that need reassignment until balanced.
|
||||
func (s *stickyBalanceStrategy) performReassignments(reassignablePartitions []topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) bool {
|
||||
reassignmentPerformed := false
|
||||
modified := false
|
||||
|
||||
// repeat reassignment until no partition can be moved to improve the balance
|
||||
for {
|
||||
modified = false
|
||||
// reassign all reassignable partitions (starting from the partition with least potential consumers and if needed)
|
||||
// until the full list is processed or a balance is achieved
|
||||
for _, partition := range reassignablePartitions {
|
||||
if isBalanced(currentAssignment, consumer2AllPotentialPartitions) {
|
||||
break
|
||||
}
|
||||
|
||||
// the partition must have at least two consumers
|
||||
if len(partition2AllPotentialConsumers[partition]) <= 1 {
|
||||
Logger.Printf("Expected more than one potential consumer for partition %s topic %d", partition.Topic, partition.Partition)
|
||||
}
|
||||
|
||||
// the partition must have a consumer
|
||||
consumer := currentPartitionConsumer[partition]
|
||||
if consumer == "" {
|
||||
Logger.Printf("Expected topic %s partition %d to be assigned to a consumer", partition.Topic, partition.Partition)
|
||||
}
|
||||
|
||||
if _, exists := prevAssignment[partition]; exists {
|
||||
if len(currentAssignment[consumer]) > (len(currentAssignment[prevAssignment[partition].MemberID]) + 1) {
|
||||
sortedCurrentSubscriptions = s.reassignPartition(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, prevAssignment[partition].MemberID)
|
||||
reassignmentPerformed = true
|
||||
modified = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// check if a better-suited consumer exists for the partition; if so, reassign it
|
||||
for _, otherConsumer := range partition2AllPotentialConsumers[partition] {
|
||||
if len(currentAssignment[consumer]) > (len(currentAssignment[otherConsumer]) + 1) {
|
||||
sortedCurrentSubscriptions = s.reassignPartitionToNewConsumer(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, consumer2AllPotentialPartitions)
|
||||
reassignmentPerformed = true
|
||||
modified = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !modified {
|
||||
return reassignmentPerformed
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Identify a new consumer for a topic partition and reassign it.
|
||||
func (s *stickyBalanceStrategy) reassignPartitionToNewConsumer(partition topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) []string {
|
||||
for _, anotherConsumer := range sortedCurrentSubscriptions {
|
||||
if memberAssignmentsIncludeTopicPartition(consumer2AllPotentialPartitions[anotherConsumer], partition) {
|
||||
return s.reassignPartition(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, anotherConsumer)
|
||||
}
|
||||
}
|
||||
return sortedCurrentSubscriptions
|
||||
}
|
||||
|
||||
// Reassign a specific partition to a new consumer
|
||||
func (s *stickyBalanceStrategy) reassignPartition(partition topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string, newConsumer string) []string {
|
||||
consumer := currentPartitionConsumer[partition]
|
||||
// find the correct partition movement considering the stickiness requirement
|
||||
partitionToBeMoved := s.movements.getTheActualPartitionToBeMoved(partition, consumer, newConsumer)
|
||||
return s.processPartitionMovement(partitionToBeMoved, newConsumer, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer)
|
||||
}
|
||||
|
||||
// Track the movement of a topic partition after assignment
|
||||
func (s *stickyBalanceStrategy) processPartitionMovement(partition topicPartitionAssignment, newConsumer string, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string) []string {
|
||||
oldConsumer := currentPartitionConsumer[partition]
|
||||
s.movements.movePartition(partition, oldConsumer, newConsumer)
|
||||
|
||||
currentAssignment[oldConsumer] = removeTopicPartitionFromMemberAssignments(currentAssignment[oldConsumer], partition)
|
||||
currentAssignment[newConsumer] = append(currentAssignment[newConsumer], partition)
|
||||
currentPartitionConsumer[partition] = newConsumer
|
||||
return sortMemberIDsByPartitionAssignments(currentAssignment)
|
||||
}
|
||||
|
||||
// Determine whether a specific consumer should be considered for topic partition assignment.
|
||||
func canConsumerParticipateInReassignment(memberID string, currentAssignment map[string][]topicPartitionAssignment, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) bool {
|
||||
currentPartitions := currentAssignment[memberID]
|
||||
currentAssignmentSize := len(currentPartitions)
|
||||
maxAssignmentSize := len(consumer2AllPotentialPartitions[memberID])
|
||||
if currentAssignmentSize > maxAssignmentSize {
|
||||
Logger.Printf("The consumer %s is assigned more partitions than the maximum possible", memberID)
|
||||
}
|
||||
if currentAssignmentSize < maxAssignmentSize {
|
||||
// if a consumer is not assigned all its potential partitions it is subject to reassignment
|
||||
return true
|
||||
}
|
||||
for _, partition := range currentPartitions {
|
||||
if canTopicPartitionParticipateInReassignment(partition, partition2AllPotentialConsumers) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Only consider reassigning those topic partitions that have two or more potential consumers.
|
||||
func canTopicPartitionParticipateInReassignment(partition topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) bool {
|
||||
return len(partition2AllPotentialConsumers[partition]) >= 2
|
||||
}
|
||||
|
||||
// The assignment should improve the overall balance of the partition assignments to consumers.
|
||||
func assignPartition(partition topicPartitionAssignment, sortedCurrentSubscriptions []string, currentAssignment map[string][]topicPartitionAssignment, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, currentPartitionConsumer map[topicPartitionAssignment]string) []string {
|
||||
for _, memberID := range sortedCurrentSubscriptions {
|
||||
if memberAssignmentsIncludeTopicPartition(consumer2AllPotentialPartitions[memberID], partition) {
|
||||
currentAssignment[memberID] = append(currentAssignment[memberID], partition)
|
||||
currentPartitionConsumer[partition] = memberID
|
||||
break
|
||||
}
|
||||
}
|
||||
return sortMemberIDsByPartitionAssignments(currentAssignment)
|
||||
}
|
||||
|
||||
// Deserialize topic partition assignment data to aid with creation of a sticky assignment.
|
||||
func deserializeTopicPartitionAssignment(userDataBytes []byte) (StickyAssignorUserData, error) {
|
||||
userDataV1 := &StickyAssignorUserDataV1{}
|
||||
if err := decode(userDataBytes, userDataV1); err != nil {
|
||||
userDataV0 := &StickyAssignorUserDataV0{}
|
||||
if err := decode(userDataBytes, userDataV0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return userDataV0, nil
|
||||
}
|
||||
return userDataV1, nil
|
||||
}
|
||||
|
||||
// filterAssignedPartitions returns a map of consumer group members to their list of previously-assigned topic partitions, limited
|
||||
// to those topic partitions currently reported by the Kafka cluster.
|
||||
func filterAssignedPartitions(currentAssignment map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) map[string][]topicPartitionAssignment {
|
||||
assignments := deepCopyAssignment(currentAssignment)
|
||||
for memberID, partitions := range assignments {
|
||||
// perform in-place filtering
|
||||
i := 0
|
||||
for _, partition := range partitions {
|
||||
if _, exists := partition2AllPotentialConsumers[partition]; exists {
|
||||
partitions[i] = partition
|
||||
i++
|
||||
}
|
||||
}
|
||||
assignments[memberID] = partitions[:i]
|
||||
}
|
||||
return assignments
|
||||
}
|
||||
|
||||
func removeTopicPartitionFromMemberAssignments(assignments []topicPartitionAssignment, topic topicPartitionAssignment) []topicPartitionAssignment {
|
||||
for i, assignment := range assignments {
|
||||
if assignment == topic {
|
||||
return append(assignments[:i], assignments[i+1:]...)
|
||||
}
|
||||
}
|
||||
return assignments
|
||||
}
|
||||
|
||||
func memberAssignmentsIncludeTopicPartition(assignments []topicPartitionAssignment, topic topicPartitionAssignment) bool {
|
||||
for _, assignment := range assignments {
|
||||
if assignment == topic {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func sortPartitions(currentAssignment map[string][]topicPartitionAssignment, partitionsWithADifferentPreviousAssignment map[topicPartitionAssignment]consumerGenerationPair, isFreshAssignment bool, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) []topicPartitionAssignment {
|
||||
unassignedPartitions := make(map[topicPartitionAssignment]bool, len(partition2AllPotentialConsumers))
|
||||
for partition := range partition2AllPotentialConsumers {
|
||||
unassignedPartitions[partition] = true
|
||||
}
|
||||
|
||||
sortedPartitions := make([]topicPartitionAssignment, 0)
|
||||
if !isFreshAssignment && areSubscriptionsIdentical(partition2AllPotentialConsumers, consumer2AllPotentialPartitions) {
|
||||
// if this is a reassignment and the subscriptions are identical (all consumers can consumer from all topics)
|
||||
// then we just need to simply list partitions in a round robin fashion (from consumers with
|
||||
// most assigned partitions to those with least)
|
||||
assignments := filterAssignedPartitions(currentAssignment, partition2AllPotentialConsumers)
|
||||
|
||||
// use priority-queue to evaluate consumer group members in descending-order based on
|
||||
// the number of topic partition assignments (i.e. consumers with most assignments first)
|
||||
pq := make(assignmentPriorityQueue, len(assignments))
|
||||
i := 0
|
||||
for consumerID, consumerAssignments := range assignments {
|
||||
pq[i] = &consumerGroupMember{
|
||||
id: consumerID,
|
||||
assignments: consumerAssignments,
|
||||
}
|
||||
i++
|
||||
}
|
||||
heap.Init(&pq)
|
||||
|
||||
for {
|
||||
// loop until no consumer-group members remain
|
||||
if pq.Len() == 0 {
|
||||
break
|
||||
}
|
||||
member := pq[0]
|
||||
|
||||
// partitions that were assigned to a different consumer last time
|
||||
var prevPartitionIndex int
|
||||
for i, partition := range member.assignments {
|
||||
if _, exists := partitionsWithADifferentPreviousAssignment[partition]; exists {
|
||||
prevPartitionIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(member.assignments) > 0 {
|
||||
partition := member.assignments[prevPartitionIndex]
|
||||
sortedPartitions = append(sortedPartitions, partition)
|
||||
delete(unassignedPartitions, partition)
|
||||
if prevPartitionIndex == 0 {
|
||||
member.assignments = member.assignments[1:]
|
||||
} else {
|
||||
member.assignments = append(member.assignments[:prevPartitionIndex], member.assignments[prevPartitionIndex+1:]...)
|
||||
}
|
||||
heap.Fix(&pq, 0)
|
||||
} else {
|
||||
heap.Pop(&pq)
|
||||
}
|
||||
}
|
||||
|
||||
for partition := range unassignedPartitions {
|
||||
sortedPartitions = append(sortedPartitions, partition)
|
||||
}
|
||||
} else {
|
||||
// an ascending sorted set of topic partitions based on how many consumers can potentially use them
|
||||
sortedPartitions = sortPartitionsByPotentialConsumerAssignments(partition2AllPotentialConsumers)
|
||||
}
|
||||
return sortedPartitions
|
||||
}
|
||||
|
||||
func sortMemberIDsByPartitionAssignments(assignments map[string][]topicPartitionAssignment) []string {
|
||||
// sort the members by the number of partition assignments in ascending order
|
||||
sortedMemberIDs := make([]string, 0, len(assignments))
|
||||
for memberID := range assignments {
|
||||
sortedMemberIDs = append(sortedMemberIDs, memberID)
|
||||
}
|
||||
sort.SliceStable(sortedMemberIDs, func(i, j int) bool {
|
||||
ret := len(assignments[sortedMemberIDs[i]]) - len(assignments[sortedMemberIDs[j]])
|
||||
if ret == 0 {
|
||||
return sortedMemberIDs[i] < sortedMemberIDs[j]
|
||||
}
|
||||
return len(assignments[sortedMemberIDs[i]]) < len(assignments[sortedMemberIDs[j]])
|
||||
})
|
||||
return sortedMemberIDs
|
||||
}
|
||||
|
||||
func sortPartitionsByPotentialConsumerAssignments(partition2AllPotentialConsumers map[topicPartitionAssignment][]string) []topicPartitionAssignment {
|
||||
// sort the members by the number of partition assignments in descending order
|
||||
sortedPartionIDs := make([]topicPartitionAssignment, len(partition2AllPotentialConsumers))
|
||||
i := 0
|
||||
for partition := range partition2AllPotentialConsumers {
|
||||
sortedPartionIDs[i] = partition
|
||||
i++
|
||||
}
|
||||
sort.Slice(sortedPartionIDs, func(i, j int) bool {
|
||||
if len(partition2AllPotentialConsumers[sortedPartionIDs[i]]) == len(partition2AllPotentialConsumers[sortedPartionIDs[j]]) {
|
||||
ret := strings.Compare(sortedPartionIDs[i].Topic, sortedPartionIDs[j].Topic)
|
||||
if ret == 0 {
|
||||
return sortedPartionIDs[i].Partition < sortedPartionIDs[j].Partition
|
||||
}
|
||||
return ret < 0
|
||||
}
|
||||
return len(partition2AllPotentialConsumers[sortedPartionIDs[i]]) < len(partition2AllPotentialConsumers[sortedPartionIDs[j]])
|
||||
})
|
||||
return sortedPartionIDs
|
||||
}
|
||||
|
||||
func deepCopyAssignment(assignment map[string][]topicPartitionAssignment) map[string][]topicPartitionAssignment {
|
||||
m := make(map[string][]topicPartitionAssignment, len(assignment))
|
||||
for memberID, subscriptions := range assignment {
|
||||
m[memberID] = append(subscriptions[:0:0], subscriptions...)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func areSubscriptionsIdentical(partition2AllPotentialConsumers map[topicPartitionAssignment][]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) bool {
|
||||
curMembers := make(map[string]int)
|
||||
for _, cur := range partition2AllPotentialConsumers {
|
||||
if len(curMembers) == 0 {
|
||||
for _, curMembersElem := range cur {
|
||||
curMembers[curMembersElem]++
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if len(curMembers) != len(cur) {
|
||||
return false
|
||||
}
|
||||
|
||||
yMap := make(map[string]int)
|
||||
for _, yElem := range cur {
|
||||
yMap[yElem]++
|
||||
}
|
||||
|
||||
for curMembersMapKey, curMembersMapVal := range curMembers {
|
||||
if yMap[curMembersMapKey] != curMembersMapVal {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
curPartitions := make(map[topicPartitionAssignment]int)
|
||||
for _, cur := range consumer2AllPotentialPartitions {
|
||||
if len(curPartitions) == 0 {
|
||||
for _, curPartitionElem := range cur {
|
||||
curPartitions[curPartitionElem]++
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if len(curPartitions) != len(cur) {
|
||||
return false
|
||||
}
|
||||
|
||||
yMap := make(map[topicPartitionAssignment]int)
|
||||
for _, yElem := range cur {
|
||||
yMap[yElem]++
|
||||
}
|
||||
|
||||
for curMembersMapKey, curMembersMapVal := range curPartitions {
|
||||
if yMap[curMembersMapKey] != curMembersMapVal {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// We need to process subscriptions' user data with each consumer's reported generation in mind
|
||||
// higher generations overwrite lower generations in case of a conflict
|
||||
// note that a conflict could exist only if user data is for different generations
|
||||
func prepopulateCurrentAssignments(members map[string]ConsumerGroupMemberMetadata) (map[string][]topicPartitionAssignment, map[topicPartitionAssignment]consumerGenerationPair, error) {
|
||||
currentAssignment := make(map[string][]topicPartitionAssignment)
|
||||
prevAssignment := make(map[topicPartitionAssignment]consumerGenerationPair)
|
||||
|
||||
// for each partition we create a sorted map of its consumers by generation
|
||||
sortedPartitionConsumersByGeneration := make(map[topicPartitionAssignment]map[int]string)
|
||||
for memberID, meta := range members {
|
||||
consumerUserData, err := deserializeTopicPartitionAssignment(meta.UserData)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
for _, partition := range consumerUserData.partitions() {
|
||||
if consumers, exists := sortedPartitionConsumersByGeneration[partition]; exists {
|
||||
if consumerUserData.hasGeneration() {
|
||||
if _, generationExists := consumers[consumerUserData.generation()]; generationExists {
|
||||
// same partition is assigned to two consumers during the same rebalance.
|
||||
// log a warning and skip this record
|
||||
Logger.Printf("Topic %s Partition %d is assigned to multiple consumers following sticky assignment generation %d", partition.Topic, partition.Partition, consumerUserData.generation())
|
||||
continue
|
||||
} else {
|
||||
consumers[consumerUserData.generation()] = memberID
|
||||
}
|
||||
} else {
|
||||
consumers[defaultGeneration] = memberID
|
||||
}
|
||||
} else {
|
||||
generation := defaultGeneration
|
||||
if consumerUserData.hasGeneration() {
|
||||
generation = consumerUserData.generation()
|
||||
}
|
||||
sortedPartitionConsumersByGeneration[partition] = map[int]string{generation: memberID}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// prevAssignment holds the prior ConsumerGenerationPair (before current) of each partition
|
||||
// current and previous consumers are the last two consumers of each partition in the above sorted map
|
||||
for partition, consumers := range sortedPartitionConsumersByGeneration {
|
||||
// sort consumers by generation in decreasing order
|
||||
var generations []int
|
||||
for generation := range consumers {
|
||||
generations = append(generations, generation)
|
||||
}
|
||||
sort.Sort(sort.Reverse(sort.IntSlice(generations)))
|
||||
|
||||
consumer := consumers[generations[0]]
|
||||
if _, exists := currentAssignment[consumer]; !exists {
|
||||
currentAssignment[consumer] = []topicPartitionAssignment{partition}
|
||||
} else {
|
||||
currentAssignment[consumer] = append(currentAssignment[consumer], partition)
|
||||
}
|
||||
|
||||
// check for previous assignment, if any
|
||||
if len(generations) > 1 {
|
||||
prevAssignment[partition] = consumerGenerationPair{
|
||||
MemberID: consumers[generations[1]],
|
||||
Generation: generations[1],
|
||||
}
|
||||
}
|
||||
}
|
||||
return currentAssignment, prevAssignment, nil
|
||||
}
|
||||
|
||||
type consumerGenerationPair struct {
|
||||
MemberID string
|
||||
Generation int
|
||||
}
|
||||
|
||||
// consumerPair represents a pair of Kafka consumer ids involved in a partition reassignment.
|
||||
type consumerPair struct {
|
||||
SrcMemberID string
|
||||
DstMemberID string
|
||||
}
|
||||
|
||||
// partitionMovements maintains some data structures to simplify lookup of partition movements among consumers.
|
||||
type partitionMovements struct {
|
||||
PartitionMovementsByTopic map[string]map[consumerPair]map[topicPartitionAssignment]bool
|
||||
Movements map[topicPartitionAssignment]consumerPair
|
||||
}
|
||||
|
||||
func (p *partitionMovements) removeMovementRecordOfPartition(partition topicPartitionAssignment) consumerPair {
|
||||
pair := p.Movements[partition]
|
||||
delete(p.Movements, partition)
|
||||
|
||||
partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic]
|
||||
delete(partitionMovementsForThisTopic[pair], partition)
|
||||
if len(partitionMovementsForThisTopic[pair]) == 0 {
|
||||
delete(partitionMovementsForThisTopic, pair)
|
||||
}
|
||||
if len(p.PartitionMovementsByTopic[partition.Topic]) == 0 {
|
||||
delete(p.PartitionMovementsByTopic, partition.Topic)
|
||||
}
|
||||
return pair
|
||||
}
|
||||
|
||||
func (p *partitionMovements) addPartitionMovementRecord(partition topicPartitionAssignment, pair consumerPair) {
|
||||
p.Movements[partition] = pair
|
||||
if _, exists := p.PartitionMovementsByTopic[partition.Topic]; !exists {
|
||||
p.PartitionMovementsByTopic[partition.Topic] = make(map[consumerPair]map[topicPartitionAssignment]bool)
|
||||
}
|
||||
partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic]
|
||||
if _, exists := partitionMovementsForThisTopic[pair]; !exists {
|
||||
partitionMovementsForThisTopic[pair] = make(map[topicPartitionAssignment]bool)
|
||||
}
|
||||
partitionMovementsForThisTopic[pair][partition] = true
|
||||
}
|
||||
|
||||
func (p *partitionMovements) movePartition(partition topicPartitionAssignment, oldConsumer, newConsumer string) {
|
||||
pair := consumerPair{
|
||||
SrcMemberID: oldConsumer,
|
||||
DstMemberID: newConsumer,
|
||||
}
|
||||
if _, exists := p.Movements[partition]; exists {
|
||||
// this partition has previously moved
|
||||
existingPair := p.removeMovementRecordOfPartition(partition)
|
||||
if existingPair.DstMemberID != oldConsumer {
|
||||
Logger.Printf("Existing pair DstMemberID %s was not equal to the oldConsumer ID %s", existingPair.DstMemberID, oldConsumer)
|
||||
}
|
||||
if existingPair.SrcMemberID != newConsumer {
|
||||
// the partition is not moving back to its previous consumer
|
||||
p.addPartitionMovementRecord(partition, consumerPair{
|
||||
SrcMemberID: existingPair.SrcMemberID,
|
||||
DstMemberID: newConsumer,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
p.addPartitionMovementRecord(partition, pair)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *partitionMovements) getTheActualPartitionToBeMoved(partition topicPartitionAssignment, oldConsumer, newConsumer string) topicPartitionAssignment {
|
||||
if _, exists := p.PartitionMovementsByTopic[partition.Topic]; !exists {
|
||||
return partition
|
||||
}
|
||||
if _, exists := p.Movements[partition]; exists {
|
||||
// this partition has previously moved
|
||||
if oldConsumer != p.Movements[partition].DstMemberID {
|
||||
Logger.Printf("Partition movement DstMemberID %s was not equal to the oldConsumer ID %s", p.Movements[partition].DstMemberID, oldConsumer)
|
||||
}
|
||||
oldConsumer = p.Movements[partition].SrcMemberID
|
||||
}
|
||||
|
||||
partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic]
|
||||
reversePair := consumerPair{
|
||||
SrcMemberID: newConsumer,
|
||||
DstMemberID: oldConsumer,
|
||||
}
|
||||
if _, exists := partitionMovementsForThisTopic[reversePair]; !exists {
|
||||
return partition
|
||||
}
|
||||
var reversePairPartition topicPartitionAssignment
|
||||
for otherPartition := range partitionMovementsForThisTopic[reversePair] {
|
||||
reversePairPartition = otherPartition
|
||||
}
|
||||
return reversePairPartition
|
||||
}
|
||||
|
||||
func (p *partitionMovements) isLinked(src, dst string, pairs []consumerPair, currentPath []string) ([]string, bool) {
|
||||
if src == dst {
|
||||
return currentPath, false
|
||||
}
|
||||
if len(pairs) == 0 {
|
||||
return currentPath, false
|
||||
}
|
||||
for _, pair := range pairs {
|
||||
if src == pair.SrcMemberID && dst == pair.DstMemberID {
|
||||
currentPath = append(currentPath, src, dst)
|
||||
return currentPath, true
|
||||
}
|
||||
}
|
||||
|
||||
for _, pair := range pairs {
|
||||
if pair.SrcMemberID == src {
|
||||
// create a deep copy of the pairs, excluding the current pair
|
||||
reducedSet := make([]consumerPair, len(pairs)-1)
|
||||
i := 0
|
||||
for _, p := range pairs {
|
||||
if p != pair {
|
||||
reducedSet[i] = pair
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
currentPath = append(currentPath, pair.SrcMemberID)
|
||||
return p.isLinked(pair.DstMemberID, dst, reducedSet, currentPath)
|
||||
}
|
||||
}
|
||||
return currentPath, false
|
||||
}
|
||||
|
||||
func (p *partitionMovements) in(cycle []string, cycles [][]string) bool {
|
||||
superCycle := make([]string, len(cycle)-1)
|
||||
for i := 0; i < len(cycle)-1; i++ {
|
||||
superCycle[i] = cycle[i]
|
||||
}
|
||||
superCycle = append(superCycle, cycle...)
|
||||
for _, foundCycle := range cycles {
|
||||
if len(foundCycle) == len(cycle) && indexOfSubList(superCycle, foundCycle) != -1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *partitionMovements) hasCycles(pairs []consumerPair) bool {
|
||||
cycles := make([][]string, 0)
|
||||
for _, pair := range pairs {
|
||||
// create a deep copy of the pairs, excluding the current pair
|
||||
reducedPairs := make([]consumerPair, len(pairs)-1)
|
||||
i := 0
|
||||
for _, p := range pairs {
|
||||
if p != pair {
|
||||
reducedPairs[i] = pair
|
||||
i++
|
||||
}
|
||||
}
|
||||
if path, linked := p.isLinked(pair.DstMemberID, pair.SrcMemberID, reducedPairs, []string{pair.SrcMemberID}); linked {
|
||||
if !p.in(path, cycles) {
|
||||
cycles = append(cycles, path)
|
||||
Logger.Printf("A cycle of length %d was found: %v", len(path)-1, path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// for now we want to make sure there is no partition movements of the same topic between a pair of consumers.
|
||||
// the odds of finding a cycle among more than two consumers seem to be very low (according to various randomized
|
||||
// tests with the given sticky algorithm) that it should not worth the added complexity of handling those cases.
|
||||
for _, cycle := range cycles {
|
||||
if len(cycle) == 3 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *partitionMovements) isSticky() bool {
|
||||
for topic, movements := range p.PartitionMovementsByTopic {
|
||||
movementPairs := make([]consumerPair, len(movements))
|
||||
i := 0
|
||||
for pair := range movements {
|
||||
movementPairs[i] = pair
|
||||
i++
|
||||
}
|
||||
if p.hasCycles(movementPairs) {
|
||||
Logger.Printf("Stickiness is violated for topic %s", topic)
|
||||
Logger.Printf("Partition movements for this topic occurred among the following consumer pairs: %v", movements)
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func indexOfSubList(source []string, target []string) int {
|
||||
targetSize := len(target)
|
||||
maxCandidate := len(source) - targetSize
|
||||
nextCand:
|
||||
for candidate := 0; candidate <= maxCandidate; candidate++ {
|
||||
j := candidate
|
||||
for i := 0; i < targetSize; i++ {
|
||||
if target[i] != source[j] {
|
||||
// Element mismatch, try next cand
|
||||
continue nextCand
|
||||
}
|
||||
j++
|
||||
}
|
||||
// All elements of candidate matched target
|
||||
return candidate
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
type consumerGroupMember struct {
|
||||
id string
|
||||
assignments []topicPartitionAssignment
|
||||
}
|
||||
|
||||
// assignmentPriorityQueue is a priority-queue of consumer group members that is sorted
|
||||
// in descending order (most assignments to least assignments).
|
||||
type assignmentPriorityQueue []*consumerGroupMember
|
||||
|
||||
func (pq assignmentPriorityQueue) Len() int { return len(pq) }
|
||||
|
||||
func (pq assignmentPriorityQueue) Less(i, j int) bool {
|
||||
// order asssignment priority queue in descending order using assignment-count/member-id
|
||||
if len(pq[i].assignments) == len(pq[j].assignments) {
|
||||
return strings.Compare(pq[i].id, pq[j].id) > 0
|
||||
}
|
||||
return len(pq[i].assignments) > len(pq[j].assignments)
|
||||
}
|
||||
|
||||
func (pq assignmentPriorityQueue) Swap(i, j int) {
|
||||
pq[i], pq[j] = pq[j], pq[i]
|
||||
}
|
||||
|
||||
func (pq *assignmentPriorityQueue) Push(x interface{}) {
|
||||
member := x.(*consumerGroupMember)
|
||||
*pq = append(*pq, member)
|
||||
}
|
||||
|
||||
func (pq *assignmentPriorityQueue) Pop() interface{} {
|
||||
old := *pq
|
||||
n := len(old)
|
||||
member := old[n-1]
|
||||
*pq = old[0 : n-1]
|
||||
return member
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -17,9 +17,15 @@ type Client interface {
|
|||
// altered after it has been created.
|
||||
Config() *Config
|
||||
|
||||
// Controller returns the cluster controller broker. Requires Kafka 0.10 or higher.
|
||||
// Controller returns the cluster controller broker. It will return a
|
||||
// locally cached value if it's available. You can call RefreshController
|
||||
// to update the cached value. Requires Kafka 0.10 or higher.
|
||||
Controller() (*Broker, error)
|
||||
|
||||
// RefreshController retrieves the cluster controller from fresh metadata
|
||||
// and stores it in the local cache. Requires Kafka 0.10 or higher.
|
||||
RefreshController() (*Broker, error)
|
||||
|
||||
// Brokers returns the current set of active brokers as retrieved from cluster metadata.
|
||||
Brokers() []*Broker
|
||||
|
||||
|
@ -46,6 +52,15 @@ type Client interface {
|
|||
// the partition leader.
|
||||
InSyncReplicas(topic string, partitionID int32) ([]int32, error)
|
||||
|
||||
// OfflineReplicas returns the set of all offline replica IDs for the given
|
||||
// partition. Offline replicas are replicas which are offline
|
||||
OfflineReplicas(topic string, partitionID int32) ([]int32, error)
|
||||
|
||||
// RefreshBrokers takes a list of addresses to be used as seed brokers.
|
||||
// Existing broker connections are closed and the updated list of seed brokers
|
||||
// will be used for the next metadata fetch.
|
||||
RefreshBrokers(addrs []string) error
|
||||
|
||||
// RefreshMetadata takes a list of topics and queries the cluster to refresh the
|
||||
// available metadata for those topics. If no topics are provided, it will refresh
|
||||
// metadata for all topics.
|
||||
|
@ -67,6 +82,9 @@ type Client interface {
|
|||
// in local cache. This function only works on Kafka 0.8.2 and higher.
|
||||
RefreshCoordinator(consumerGroup string) error
|
||||
|
||||
// InitProducerID retrieves information required for Idempotent Producer
|
||||
InitProducerID() (*InitProducerIDResponse, error)
|
||||
|
||||
// Close shuts down all broker connections managed by this client. It is required
|
||||
// to call this function before a client object passes out of scope, as it will
|
||||
// otherwise leak memory. You must close any Producers or Consumers using a client
|
||||
|
@ -75,6 +93,9 @@ type Client interface {
|
|||
|
||||
// Closed returns true if the client has already had Close called on it
|
||||
Closed() bool
|
||||
|
||||
// A private method to prevent users implementing the interface for compatibility
|
||||
private()
|
||||
}
|
||||
|
||||
const (
|
||||
|
@ -142,10 +163,7 @@ func NewClient(addrs []string, conf *Config) (Client, error) {
|
|||
coordinators: make(map[string]int32),
|
||||
}
|
||||
|
||||
random := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
for _, index := range random.Perm(len(addrs)) {
|
||||
client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
|
||||
}
|
||||
client.randomizeSeedBrokers(addrs)
|
||||
|
||||
if conf.Metadata.Full {
|
||||
// do an initial fetch of all cluster metadata by specifying an empty list of topics
|
||||
|
@ -169,6 +187,8 @@ func NewClient(addrs []string, conf *Config) (Client, error) {
|
|||
return client, nil
|
||||
}
|
||||
|
||||
func (client *client) private() {}
|
||||
|
||||
func (client *client) Config() *Config {
|
||||
return client.conf
|
||||
}
|
||||
|
@ -176,13 +196,32 @@ func (client *client) Config() *Config {
|
|||
func (client *client) Brokers() []*Broker {
|
||||
client.lock.RLock()
|
||||
defer client.lock.RUnlock()
|
||||
brokers := make([]*Broker, 0)
|
||||
brokers := make([]*Broker, 0, len(client.brokers))
|
||||
for _, broker := range client.brokers {
|
||||
brokers = append(brokers, broker)
|
||||
}
|
||||
return brokers
|
||||
}
|
||||
|
||||
func (client *client) InitProducerID() (*InitProducerIDResponse, error) {
|
||||
var err error
|
||||
for broker := client.any(); broker != nil; broker = client.any() {
|
||||
req := &InitProducerIDRequest{}
|
||||
|
||||
response, err := broker.InitProducerID(req)
|
||||
switch err.(type) {
|
||||
case nil:
|
||||
return response, nil
|
||||
default:
|
||||
// some error, remove that broker and try again
|
||||
Logger.Printf("Client got error from broker %d when issuing InitProducerID : %v\n", broker.ID(), err)
|
||||
_ = broker.Close()
|
||||
client.deregisterBroker(broker)
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (client *client) Close() error {
|
||||
if client.Closed() {
|
||||
// Chances are this is being called from a defer() and the error will go unobserved
|
||||
|
@ -215,6 +254,9 @@ func (client *client) Close() error {
|
|||
}
|
||||
|
||||
func (client *client) Closed() bool {
|
||||
client.lock.RLock()
|
||||
defer client.lock.RUnlock()
|
||||
|
||||
return client.brokers == nil
|
||||
}
|
||||
|
||||
|
@ -265,7 +307,8 @@ func (client *client) Partitions(topic string) ([]int32, error) {
|
|||
partitions = client.cachedPartitions(topic, allPartitions)
|
||||
}
|
||||
|
||||
if partitions == nil {
|
||||
// no partitions found after refresh metadata
|
||||
if len(partitions) == 0 {
|
||||
return nil, ErrUnknownTopicOrPartition
|
||||
}
|
||||
|
||||
|
@ -350,6 +393,31 @@ func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32,
|
|||
return dupInt32Slice(metadata.Isr), nil
|
||||
}
|
||||
|
||||
func (client *client) OfflineReplicas(topic string, partitionID int32) ([]int32, error) {
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
metadata := client.cachedMetadata(topic, partitionID)
|
||||
|
||||
if metadata == nil {
|
||||
err := client.RefreshMetadata(topic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
metadata = client.cachedMetadata(topic, partitionID)
|
||||
}
|
||||
|
||||
if metadata == nil {
|
||||
return nil, ErrUnknownTopicOrPartition
|
||||
}
|
||||
|
||||
if metadata.Err == ErrReplicaNotAvailable {
|
||||
return dupInt32Slice(metadata.OfflineReplicas), metadata.Err
|
||||
}
|
||||
return dupInt32Slice(metadata.OfflineReplicas), nil
|
||||
}
|
||||
|
||||
func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
|
@ -368,6 +436,27 @@ func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
|
|||
return leader, err
|
||||
}
|
||||
|
||||
func (client *client) RefreshBrokers(addrs []string) error {
|
||||
if client.Closed() {
|
||||
return ErrClosedClient
|
||||
}
|
||||
|
||||
client.lock.Lock()
|
||||
defer client.lock.Unlock()
|
||||
|
||||
for _, broker := range client.brokers {
|
||||
_ = broker.Close()
|
||||
delete(client.brokers, broker.ID())
|
||||
}
|
||||
|
||||
client.seedBrokers = nil
|
||||
client.deadSeeds = nil
|
||||
|
||||
client.randomizeSeedBrokers(addrs)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (client *client) RefreshMetadata(topics ...string) error {
|
||||
if client.Closed() {
|
||||
return ErrClosedClient
|
||||
|
@ -382,7 +471,11 @@ func (client *client) RefreshMetadata(topics ...string) error {
|
|||
}
|
||||
}
|
||||
|
||||
return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max)
|
||||
deadline := time.Time{}
|
||||
if client.conf.Metadata.Timeout > 0 {
|
||||
deadline = time.Now().Add(client.conf.Metadata.Timeout)
|
||||
}
|
||||
return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max, deadline)
|
||||
}
|
||||
|
||||
func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) {
|
||||
|
@ -427,6 +520,35 @@ func (client *client) Controller() (*Broker, error) {
|
|||
return controller, nil
|
||||
}
|
||||
|
||||
// deregisterController removes the cached controllerID
|
||||
func (client *client) deregisterController() {
|
||||
client.lock.Lock()
|
||||
defer client.lock.Unlock()
|
||||
delete(client.brokers, client.controllerID)
|
||||
}
|
||||
|
||||
// RefreshController retrieves the cluster controller from fresh metadata
|
||||
// and stores it in the local cache. Requires Kafka 0.10 or higher.
|
||||
func (client *client) RefreshController() (*Broker, error) {
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
client.deregisterController()
|
||||
|
||||
if err := client.refreshMetadata(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
controller := client.cachedController()
|
||||
if controller == nil {
|
||||
return nil, ErrControllerNotAvailable
|
||||
}
|
||||
|
||||
_ = controller.Open(client.conf)
|
||||
return controller, nil
|
||||
}
|
||||
|
||||
func (client *client) Coordinator(consumerGroup string) (*Broker, error) {
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
|
@ -468,10 +590,46 @@ func (client *client) RefreshCoordinator(consumerGroup string) error {
|
|||
|
||||
// private broker management helpers
|
||||
|
||||
func (client *client) randomizeSeedBrokers(addrs []string) {
|
||||
random := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
for _, index := range random.Perm(len(addrs)) {
|
||||
client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
|
||||
}
|
||||
}
|
||||
|
||||
func (client *client) updateBroker(brokers []*Broker) {
|
||||
var currentBroker = make(map[int32]*Broker, len(brokers))
|
||||
|
||||
for _, broker := range brokers {
|
||||
currentBroker[broker.ID()] = broker
|
||||
if client.brokers[broker.ID()] == nil { // add new broker
|
||||
client.brokers[broker.ID()] = broker
|
||||
Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
|
||||
} else if broker.Addr() != client.brokers[broker.ID()].Addr() { // replace broker with new address
|
||||
safeAsyncClose(client.brokers[broker.ID()])
|
||||
client.brokers[broker.ID()] = broker
|
||||
Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
|
||||
}
|
||||
}
|
||||
|
||||
for id, broker := range client.brokers {
|
||||
if _, exist := currentBroker[id]; !exist { // remove old broker
|
||||
safeAsyncClose(broker)
|
||||
delete(client.brokers, id)
|
||||
Logger.Printf("client/broker remove invalid broker #%d with %s", broker.ID(), broker.Addr())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered
|
||||
// in the brokers map. It returns the broker that is registered, which may be the provided broker,
|
||||
// or a previously registered Broker instance. You must hold the write lock before calling this function.
|
||||
func (client *client) registerBroker(broker *Broker) {
|
||||
if client.brokers == nil {
|
||||
Logger.Printf("cannot register broker #%d at %s, client already closed", broker.ID(), broker.Addr())
|
||||
return
|
||||
}
|
||||
|
||||
if client.brokers[broker.ID()] == nil {
|
||||
client.brokers[broker.ID()] = broker
|
||||
Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
|
||||
|
@ -665,7 +823,7 @@ func (client *client) backgroundMetadataUpdater() {
|
|||
}
|
||||
|
||||
func (client *client) refreshMetadata() error {
|
||||
topics := []string{}
|
||||
var topics []string
|
||||
|
||||
if !client.conf.Metadata.Full {
|
||||
if specificTopics, err := client.MetadataTopics(); err != nil {
|
||||
|
@ -684,29 +842,47 @@ func (client *client) refreshMetadata() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) error {
|
||||
func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, deadline time.Time) error {
|
||||
pastDeadline := func(backoff time.Duration) bool {
|
||||
if !deadline.IsZero() && time.Now().Add(backoff).After(deadline) {
|
||||
// we are past the deadline
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
retry := func(err error) error {
|
||||
if attemptsRemaining > 0 {
|
||||
Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
|
||||
time.Sleep(client.conf.Metadata.Retry.Backoff)
|
||||
return client.tryRefreshMetadata(topics, attemptsRemaining-1)
|
||||
backoff := client.computeBackoff(attemptsRemaining)
|
||||
if pastDeadline(backoff) {
|
||||
Logger.Println("client/metadata skipping last retries as we would go past the metadata timeout")
|
||||
return err
|
||||
}
|
||||
Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining)
|
||||
if backoff > 0 {
|
||||
time.Sleep(backoff)
|
||||
}
|
||||
return client.tryRefreshMetadata(topics, attemptsRemaining-1, deadline)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
for broker := client.any(); broker != nil; broker = client.any() {
|
||||
broker := client.any()
|
||||
for ; broker != nil && !pastDeadline(0); broker = client.any() {
|
||||
allowAutoTopicCreation := true
|
||||
if len(topics) > 0 {
|
||||
Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr)
|
||||
} else {
|
||||
allowAutoTopicCreation = false
|
||||
Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr)
|
||||
}
|
||||
|
||||
req := &MetadataRequest{Topics: topics}
|
||||
if client.conf.Version.IsAtLeast(V0_10_0_0) {
|
||||
req := &MetadataRequest{Topics: topics, AllowAutoTopicCreation: allowAutoTopicCreation}
|
||||
if client.conf.Version.IsAtLeast(V1_0_0_0) {
|
||||
req.Version = 5
|
||||
} else if client.conf.Version.IsAtLeast(V0_10_0_0) {
|
||||
req.Version = 1
|
||||
}
|
||||
response, err := broker.GetMetadata(req)
|
||||
|
||||
switch err.(type) {
|
||||
case nil:
|
||||
allKnownMetaData := len(topics) == 0
|
||||
|
@ -721,14 +897,36 @@ func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int)
|
|||
case PacketEncodingError:
|
||||
// didn't even send, return the error
|
||||
return err
|
||||
|
||||
case KError:
|
||||
// if SASL auth error return as this _should_ be a non retryable err for all brokers
|
||||
if err.(KError) == ErrSASLAuthenticationFailed {
|
||||
Logger.Println("client/metadata failed SASL authentication")
|
||||
return err
|
||||
}
|
||||
|
||||
if err.(KError) == ErrTopicAuthorizationFailed {
|
||||
Logger.Println("client is not authorized to access this topic. The topics were: ", topics)
|
||||
return err
|
||||
}
|
||||
// else remove that broker and try again
|
||||
Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err)
|
||||
_ = broker.Close()
|
||||
client.deregisterBroker(broker)
|
||||
|
||||
default:
|
||||
// some other error, remove that broker and try again
|
||||
Logger.Println("client/metadata got error from broker while fetching metadata:", err)
|
||||
Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err)
|
||||
_ = broker.Close()
|
||||
client.deregisterBroker(broker)
|
||||
}
|
||||
}
|
||||
|
||||
if broker != nil {
|
||||
Logger.Printf("client/metadata not fetching metadata from broker %s as we would go past the metadata timeout\n", broker.addr)
|
||||
return retry(ErrOutOfBrokers)
|
||||
}
|
||||
|
||||
Logger.Println("client/metadata no available broker to send metadata request to")
|
||||
client.resurrectDeadBrokers()
|
||||
return retry(ErrOutOfBrokers)
|
||||
|
@ -736,16 +934,19 @@ func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int)
|
|||
|
||||
// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable
|
||||
func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bool) (retry bool, err error) {
|
||||
if client.Closed() {
|
||||
return
|
||||
}
|
||||
|
||||
client.lock.Lock()
|
||||
defer client.lock.Unlock()
|
||||
|
||||
// For all the brokers we received:
|
||||
// - if it is a new ID, save it
|
||||
// - if it is an existing ID, but the address we have is stale, discard the old one and save it
|
||||
// - if some brokers is not exist in it, remove old broker
|
||||
// - otherwise ignore it, replacing our existing one would just bounce the connection
|
||||
for _, broker := range data.Brokers {
|
||||
client.registerBroker(broker)
|
||||
}
|
||||
client.updateBroker(data.Brokers)
|
||||
|
||||
client.controllerID = data.ControllerID
|
||||
|
||||
|
@ -766,7 +967,7 @@ func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bo
|
|||
|
||||
switch topic.Err {
|
||||
case ErrNoError:
|
||||
break
|
||||
// no-op
|
||||
case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results
|
||||
err = topic.Err
|
||||
continue
|
||||
|
@ -776,7 +977,6 @@ func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bo
|
|||
continue
|
||||
case ErrLeaderNotAvailable: // retry, but store partial partition results
|
||||
retry = true
|
||||
break
|
||||
default: // don't retry, don't store partial results
|
||||
Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err)
|
||||
err = topic.Err
|
||||
|
@ -816,11 +1016,21 @@ func (client *client) cachedController() *Broker {
|
|||
return client.brokers[client.controllerID]
|
||||
}
|
||||
|
||||
func (client *client) computeBackoff(attemptsRemaining int) time.Duration {
|
||||
if client.conf.Metadata.Retry.BackoffFunc != nil {
|
||||
maxRetries := client.conf.Metadata.Retry.Max
|
||||
retries := maxRetries - attemptsRemaining
|
||||
return client.conf.Metadata.Retry.BackoffFunc(retries, maxRetries)
|
||||
}
|
||||
return client.conf.Metadata.Retry.Backoff
|
||||
}
|
||||
|
||||
func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*FindCoordinatorResponse, error) {
|
||||
retry := func(err error) (*FindCoordinatorResponse, error) {
|
||||
if attemptsRemaining > 0 {
|
||||
Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
|
||||
time.Sleep(client.conf.Metadata.Retry.Backoff)
|
||||
backoff := client.computeBackoff(attemptsRemaining)
|
||||
Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining)
|
||||
time.Sleep(backoff)
|
||||
return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1)
|
||||
}
|
||||
return nil, err
|
||||
|
@ -865,6 +1075,10 @@ func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemainin
|
|||
}
|
||||
|
||||
return retry(ErrConsumerCoordinatorNotAvailable)
|
||||
case ErrGroupAuthorizationFailed:
|
||||
Logger.Printf("client was not authorized to access group %s while attempting to find coordinator", consumerGroup)
|
||||
return retry(ErrGroupAuthorizationFailed)
|
||||
|
||||
default:
|
||||
return nil, response.Err
|
||||
}
|
||||
|
@ -874,3 +1088,18 @@ func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemainin
|
|||
client.resurrectDeadBrokers()
|
||||
return retry(ErrOutOfBrokers)
|
||||
}
|
||||
|
||||
// nopCloserClient embeds an existing Client, but disables
|
||||
// the Close method (yet all other methods pass
|
||||
// through unchanged). This is for use in larger structs
|
||||
// where it is undesirable to close the client that was
|
||||
// passed in by the caller.
|
||||
type nopCloserClient struct {
|
||||
Client
|
||||
}
|
||||
|
||||
// Close intercepts and purposely does not call the underlying
|
||||
// client's Close() method.
|
||||
func (ncc *nopCloserClient) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,194 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
snappy "github.com/eapache/go-xerial-snappy"
|
||||
"github.com/pierrec/lz4"
|
||||
)
|
||||
|
||||
var (
|
||||
lz4WriterPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return lz4.NewWriter(nil)
|
||||
},
|
||||
}
|
||||
|
||||
gzipWriterPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return gzip.NewWriter(nil)
|
||||
},
|
||||
}
|
||||
gzipWriterPoolForCompressionLevel1 = sync.Pool{
|
||||
New: func() interface{} {
|
||||
gz, err := gzip.NewWriterLevel(nil, 1)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return gz
|
||||
},
|
||||
}
|
||||
gzipWriterPoolForCompressionLevel2 = sync.Pool{
|
||||
New: func() interface{} {
|
||||
gz, err := gzip.NewWriterLevel(nil, 2)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return gz
|
||||
},
|
||||
}
|
||||
gzipWriterPoolForCompressionLevel3 = sync.Pool{
|
||||
New: func() interface{} {
|
||||
gz, err := gzip.NewWriterLevel(nil, 3)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return gz
|
||||
},
|
||||
}
|
||||
gzipWriterPoolForCompressionLevel4 = sync.Pool{
|
||||
New: func() interface{} {
|
||||
gz, err := gzip.NewWriterLevel(nil, 4)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return gz
|
||||
},
|
||||
}
|
||||
gzipWriterPoolForCompressionLevel5 = sync.Pool{
|
||||
New: func() interface{} {
|
||||
gz, err := gzip.NewWriterLevel(nil, 5)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return gz
|
||||
},
|
||||
}
|
||||
gzipWriterPoolForCompressionLevel6 = sync.Pool{
|
||||
New: func() interface{} {
|
||||
gz, err := gzip.NewWriterLevel(nil, 6)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return gz
|
||||
},
|
||||
}
|
||||
gzipWriterPoolForCompressionLevel7 = sync.Pool{
|
||||
New: func() interface{} {
|
||||
gz, err := gzip.NewWriterLevel(nil, 7)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return gz
|
||||
},
|
||||
}
|
||||
gzipWriterPoolForCompressionLevel8 = sync.Pool{
|
||||
New: func() interface{} {
|
||||
gz, err := gzip.NewWriterLevel(nil, 8)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return gz
|
||||
},
|
||||
}
|
||||
gzipWriterPoolForCompressionLevel9 = sync.Pool{
|
||||
New: func() interface{} {
|
||||
gz, err := gzip.NewWriterLevel(nil, 9)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return gz
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func compress(cc CompressionCodec, level int, data []byte) ([]byte, error) {
|
||||
switch cc {
|
||||
case CompressionNone:
|
||||
return data, nil
|
||||
case CompressionGZIP:
|
||||
var (
|
||||
err error
|
||||
buf bytes.Buffer
|
||||
writer *gzip.Writer
|
||||
)
|
||||
|
||||
switch level {
|
||||
case CompressionLevelDefault:
|
||||
writer = gzipWriterPool.Get().(*gzip.Writer)
|
||||
defer gzipWriterPool.Put(writer)
|
||||
writer.Reset(&buf)
|
||||
case 1:
|
||||
writer = gzipWriterPoolForCompressionLevel1.Get().(*gzip.Writer)
|
||||
defer gzipWriterPoolForCompressionLevel1.Put(writer)
|
||||
writer.Reset(&buf)
|
||||
case 2:
|
||||
writer = gzipWriterPoolForCompressionLevel2.Get().(*gzip.Writer)
|
||||
defer gzipWriterPoolForCompressionLevel2.Put(writer)
|
||||
writer.Reset(&buf)
|
||||
case 3:
|
||||
writer = gzipWriterPoolForCompressionLevel3.Get().(*gzip.Writer)
|
||||
defer gzipWriterPoolForCompressionLevel3.Put(writer)
|
||||
writer.Reset(&buf)
|
||||
case 4:
|
||||
writer = gzipWriterPoolForCompressionLevel4.Get().(*gzip.Writer)
|
||||
defer gzipWriterPoolForCompressionLevel4.Put(writer)
|
||||
writer.Reset(&buf)
|
||||
case 5:
|
||||
writer = gzipWriterPoolForCompressionLevel5.Get().(*gzip.Writer)
|
||||
defer gzipWriterPoolForCompressionLevel5.Put(writer)
|
||||
writer.Reset(&buf)
|
||||
case 6:
|
||||
writer = gzipWriterPoolForCompressionLevel6.Get().(*gzip.Writer)
|
||||
defer gzipWriterPoolForCompressionLevel6.Put(writer)
|
||||
writer.Reset(&buf)
|
||||
case 7:
|
||||
writer = gzipWriterPoolForCompressionLevel7.Get().(*gzip.Writer)
|
||||
defer gzipWriterPoolForCompressionLevel7.Put(writer)
|
||||
writer.Reset(&buf)
|
||||
case 8:
|
||||
writer = gzipWriterPoolForCompressionLevel8.Get().(*gzip.Writer)
|
||||
defer gzipWriterPoolForCompressionLevel8.Put(writer)
|
||||
writer.Reset(&buf)
|
||||
case 9:
|
||||
writer = gzipWriterPoolForCompressionLevel9.Get().(*gzip.Writer)
|
||||
defer gzipWriterPoolForCompressionLevel9.Put(writer)
|
||||
writer.Reset(&buf)
|
||||
default:
|
||||
writer, err = gzip.NewWriterLevel(&buf, level)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if _, err := writer.Write(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
case CompressionSnappy:
|
||||
return snappy.Encode(data), nil
|
||||
case CompressionLZ4:
|
||||
writer := lz4WriterPool.Get().(*lz4.Writer)
|
||||
defer lz4WriterPool.Put(writer)
|
||||
|
||||
var buf bytes.Buffer
|
||||
writer.Reset(&buf)
|
||||
|
||||
if _, err := writer.Write(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
case CompressionZSTD:
|
||||
return zstdCompress(nil, data)
|
||||
default:
|
||||
return nil, PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", cc)}
|
||||
}
|
||||
}
|
|
@ -10,6 +10,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/rcrowley/go-metrics"
|
||||
"golang.org/x/net/proxy"
|
||||
)
|
||||
|
||||
const defaultClientID = "sarama"
|
||||
|
@ -20,6 +21,13 @@ var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`)
|
|||
type Config struct {
|
||||
// Admin is the namespace for ClusterAdmin properties used by the administrative Kafka client.
|
||||
Admin struct {
|
||||
Retry struct {
|
||||
// The total number of times to retry sending (retriable) admin requests (default 5).
|
||||
// Similar to the `retries` setting of the JVM AdminClientConfig.
|
||||
Max int
|
||||
// Backoff time between retries of a failed request (default 100ms)
|
||||
Backoff time.Duration
|
||||
}
|
||||
// The maximum duration the administrative Kafka client will wait for ClusterAdmin operations,
|
||||
// including topics, brokers, configurations and ACLs (defaults to 3 seconds).
|
||||
Timeout time.Duration
|
||||
|
@ -54,17 +62,43 @@ type Config struct {
|
|||
// Whether or not to use SASL authentication when connecting to the broker
|
||||
// (defaults to false).
|
||||
Enable bool
|
||||
// SASLMechanism is the name of the enabled SASL mechanism.
|
||||
// Possible values: OAUTHBEARER, PLAIN (defaults to PLAIN).
|
||||
Mechanism SASLMechanism
|
||||
// Version is the SASL Protocol Version to use
|
||||
// Kafka > 1.x should use V1, except on Azure EventHub which use V0
|
||||
Version int16
|
||||
// Whether or not to send the Kafka SASL handshake first if enabled
|
||||
// (defaults to true). You should only set this to false if you're using
|
||||
// a non-Kafka SASL proxy.
|
||||
Handshake bool
|
||||
//username and password for SASL/PLAIN authentication
|
||||
User string
|
||||
// AuthIdentity is an (optional) authorization identity (authzid) to
|
||||
// use for SASL/PLAIN authentication (if different from User) when
|
||||
// an authenticated user is permitted to act as the presented
|
||||
// alternative user. See RFC4616 for details.
|
||||
AuthIdentity string
|
||||
// User is the authentication identity (authcid) to present for
|
||||
// SASL/PLAIN or SASL/SCRAM authentication
|
||||
User string
|
||||
// Password for SASL/PLAIN authentication
|
||||
Password string
|
||||
// authz id used for SASL/SCRAM authentication
|
||||
SCRAMAuthzID string
|
||||
// SCRAMClientGeneratorFunc is a generator of a user provided implementation of a SCRAM
|
||||
// client used to perform the SCRAM exchange with the server.
|
||||
SCRAMClientGeneratorFunc func() SCRAMClient
|
||||
// TokenProvider is a user-defined callback for generating
|
||||
// access tokens for SASL/OAUTHBEARER auth. See the
|
||||
// AccessTokenProvider interface docs for proper implementation
|
||||
// guidelines.
|
||||
TokenProvider AccessTokenProvider
|
||||
|
||||
GSSAPI GSSAPIConfig
|
||||
}
|
||||
|
||||
// KeepAlive specifies the keep-alive period for an active network connection.
|
||||
// If zero, keep-alives are disabled. (default is 0: disabled).
|
||||
// KeepAlive specifies the keep-alive period for an active network connection (defaults to 0).
|
||||
// If zero or positive, keep-alives are enabled.
|
||||
// If negative, keep-alives are disabled.
|
||||
KeepAlive time.Duration
|
||||
|
||||
// LocalAddr is the local address to use when dialing an
|
||||
|
@ -72,6 +106,14 @@ type Config struct {
|
|||
// network being dialed.
|
||||
// If nil, a local address is automatically chosen.
|
||||
LocalAddr net.Addr
|
||||
|
||||
Proxy struct {
|
||||
// Whether or not to use proxy when connecting to the broker
|
||||
// (defaults to false).
|
||||
Enable bool
|
||||
// The proxy dialer to use enabled (defaults to nil).
|
||||
Dialer proxy.Dialer
|
||||
}
|
||||
}
|
||||
|
||||
// Metadata is the namespace for metadata management properties used by the
|
||||
|
@ -84,6 +126,10 @@ type Config struct {
|
|||
// How long to wait for leader election to occur before retrying
|
||||
// (default 250ms). Similar to the JVM's `retry.backoff.ms`.
|
||||
Backoff time.Duration
|
||||
// Called to compute backoff time dynamically. Useful for implementing
|
||||
// more sophisticated backoff strategies. This takes precedence over
|
||||
// `Backoff` if set.
|
||||
BackoffFunc func(retries, maxRetries int) time.Duration
|
||||
}
|
||||
// How frequently to refresh the cluster metadata in the background.
|
||||
// Defaults to 10 minutes. Set to 0 to disable. Similar to
|
||||
|
@ -95,6 +141,13 @@ type Config struct {
|
|||
// and usually more convenient, but can take up a substantial amount of
|
||||
// memory if you have many topics and partitions. Defaults to true.
|
||||
Full bool
|
||||
|
||||
// How long to wait for a successful metadata response.
|
||||
// Disabled by default which means a metadata request against an unreachable
|
||||
// cluster (all brokers are unreachable or unresponsive) can take up to
|
||||
// `Net.[Dial|Read]Timeout * BrokerCount * (Metadata.Retry.Max + 1) + Metadata.Retry.Backoff * Metadata.Retry.Max`
|
||||
// to fail.
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
// Producer is the namespace for configuration related to producing messages,
|
||||
|
@ -124,6 +177,9 @@ type Config struct {
|
|||
// (defaults to hashing the message key). Similar to the `partitioner.class`
|
||||
// setting for the JVM producer.
|
||||
Partitioner PartitionerConstructor
|
||||
// If enabled, the producer will ensure that exactly one copy of each message is
|
||||
// written.
|
||||
Idempotent bool
|
||||
|
||||
// Return specifies what channels will be populated. If they are set to true,
|
||||
// you must read from the respective channels to prevent deadlock. If,
|
||||
|
@ -168,7 +224,19 @@ type Config struct {
|
|||
// (default 100ms). Similar to the `retry.backoff.ms` setting of the
|
||||
// JVM producer.
|
||||
Backoff time.Duration
|
||||
// Called to compute backoff time dynamically. Useful for implementing
|
||||
// more sophisticated backoff strategies. This takes precedence over
|
||||
// `Backoff` if set.
|
||||
BackoffFunc func(retries, maxRetries int) time.Duration
|
||||
}
|
||||
|
||||
// Interceptors to be called when the producer dispatcher reads the
|
||||
// message for the first time. Interceptors allows to intercept and
|
||||
// possible mutate the message before they are published to Kafka
|
||||
// cluster. *ProducerMessage modified by the first interceptor's
|
||||
// OnSend() is passed to the second interceptor OnSend(), and so on in
|
||||
// the interceptor chain.
|
||||
Interceptors []ProducerInterceptor
|
||||
}
|
||||
|
||||
// Consumer is the namespace for configuration related to consuming messages,
|
||||
|
@ -226,6 +294,10 @@ type Config struct {
|
|||
// How long to wait after a failing to read from a partition before
|
||||
// trying again (default 2s).
|
||||
Backoff time.Duration
|
||||
// Called to compute backoff time dynamically. Useful for implementing
|
||||
// more sophisticated backoff strategies. This takes precedence over
|
||||
// `Backoff` if set.
|
||||
BackoffFunc func(retries int) time.Duration
|
||||
}
|
||||
|
||||
// Fetch is the namespace for controlling how many bytes are retrieved by any
|
||||
|
@ -263,7 +335,7 @@ type Config struct {
|
|||
// than this, that partition will stop fetching more messages until it
|
||||
// can proceed again.
|
||||
// Note that, since the Messages channel is buffered, the actual grace time is
|
||||
// (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms.
|
||||
// (MaxProcessingTime * ChannelBufferSize). Defaults to 100ms.
|
||||
// If a message is not written to the Messages channel between two ticks
|
||||
// of the expiryTicker then a timeout is detected.
|
||||
// Using a ticker instead of a timer to detect timeouts should typically
|
||||
|
@ -289,9 +361,21 @@ type Config struct {
|
|||
// offsets. This currently requires the manual use of an OffsetManager
|
||||
// but will eventually be automated.
|
||||
Offsets struct {
|
||||
// How frequently to commit updated offsets. Defaults to 1s.
|
||||
// Deprecated: CommitInterval exists for historical compatibility
|
||||
// and should not be used. Please use Consumer.Offsets.AutoCommit
|
||||
CommitInterval time.Duration
|
||||
|
||||
// AutoCommit specifies configuration for commit messages automatically.
|
||||
AutoCommit struct {
|
||||
// Whether or not to auto-commit updated offsets back to the broker.
|
||||
// (default enabled).
|
||||
Enable bool
|
||||
|
||||
// How frequently to commit updated offsets. Ineffective unless
|
||||
// auto-commit is enabled (default 1s)
|
||||
Interval time.Duration
|
||||
}
|
||||
|
||||
// The initial offset to use if no offset was previously committed.
|
||||
// Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest.
|
||||
Initial int64
|
||||
|
@ -310,12 +394,29 @@ type Config struct {
|
|||
Max int
|
||||
}
|
||||
}
|
||||
|
||||
// IsolationLevel support 2 mode:
|
||||
// - use `ReadUncommitted` (default) to consume and return all messages in message channel
|
||||
// - use `ReadCommitted` to hide messages that are part of an aborted transaction
|
||||
IsolationLevel IsolationLevel
|
||||
|
||||
// Interceptors to be called just before the record is sent to the
|
||||
// messages channel. Interceptors allows to intercept and possible
|
||||
// mutate the message before they are returned to the client.
|
||||
// *ConsumerMessage modified by the first interceptor's OnConsume() is
|
||||
// passed to the second interceptor OnConsume(), and so on in the
|
||||
// interceptor chain.
|
||||
Interceptors []ConsumerInterceptor
|
||||
}
|
||||
|
||||
// A user-provided string sent with every request to the brokers for logging,
|
||||
// debugging, and auditing purposes. Defaults to "sarama", but you should
|
||||
// probably set it to something specific to your application.
|
||||
ClientID string
|
||||
// A rack identifier for this client. This can be any string value which
|
||||
// indicates where this client is physically located.
|
||||
// It corresponds with the broker config 'broker.rack'
|
||||
RackID string
|
||||
// The number of events to buffer in internal and external channels. This
|
||||
// permits the producer and consumer to continue processing some messages
|
||||
// in the background while user code is working, greatly improving throughput.
|
||||
|
@ -340,6 +441,8 @@ type Config struct {
|
|||
func NewConfig() *Config {
|
||||
c := &Config{}
|
||||
|
||||
c.Admin.Retry.Max = 5
|
||||
c.Admin.Retry.Backoff = 100 * time.Millisecond
|
||||
c.Admin.Timeout = 3 * time.Second
|
||||
|
||||
c.Net.MaxOpenRequests = 5
|
||||
|
@ -347,6 +450,7 @@ func NewConfig() *Config {
|
|||
c.Net.ReadTimeout = 30 * time.Second
|
||||
c.Net.WriteTimeout = 30 * time.Second
|
||||
c.Net.SASL.Handshake = true
|
||||
c.Net.SASL.Version = SASLHandshakeV0
|
||||
|
||||
c.Metadata.Retry.Max = 3
|
||||
c.Metadata.Retry.Backoff = 250 * time.Millisecond
|
||||
|
@ -368,7 +472,8 @@ func NewConfig() *Config {
|
|||
c.Consumer.MaxWaitTime = 250 * time.Millisecond
|
||||
c.Consumer.MaxProcessingTime = 100 * time.Millisecond
|
||||
c.Consumer.Return.Errors = false
|
||||
c.Consumer.Offsets.CommitInterval = 1 * time.Second
|
||||
c.Consumer.Offsets.AutoCommit.Enable = true
|
||||
c.Consumer.Offsets.AutoCommit.Interval = 1 * time.Second
|
||||
c.Consumer.Offsets.Initial = OffsetNewest
|
||||
c.Consumer.Offsets.Retry.Max = 3
|
||||
|
||||
|
@ -381,7 +486,7 @@ func NewConfig() *Config {
|
|||
|
||||
c.ClientID = defaultClientID
|
||||
c.ChannelBufferSize = 256
|
||||
c.Version = MinVersion
|
||||
c.Version = DefaultVersion
|
||||
c.MetricRegistry = metrics.NewRegistry()
|
||||
|
||||
return c
|
||||
|
@ -391,10 +496,10 @@ func NewConfig() *Config {
|
|||
// ConfigurationError if the specified values don't make sense.
|
||||
func (c *Config) Validate() error {
|
||||
// some configuration values should be warned on but not fail completely, do those first
|
||||
if c.Net.TLS.Enable == false && c.Net.TLS.Config != nil {
|
||||
if !c.Net.TLS.Enable && c.Net.TLS.Config != nil {
|
||||
Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.")
|
||||
}
|
||||
if c.Net.SASL.Enable == false {
|
||||
if !c.Net.SASL.Enable {
|
||||
if c.Net.SASL.User != "" {
|
||||
Logger.Println("Net.SASL is disabled but a non-empty username was provided.")
|
||||
}
|
||||
|
@ -449,12 +554,65 @@ func (c *Config) Validate() error {
|
|||
return ConfigurationError("Net.ReadTimeout must be > 0")
|
||||
case c.Net.WriteTimeout <= 0:
|
||||
return ConfigurationError("Net.WriteTimeout must be > 0")
|
||||
case c.Net.KeepAlive < 0:
|
||||
return ConfigurationError("Net.KeepAlive must be >= 0")
|
||||
case c.Net.SASL.Enable == true && c.Net.SASL.User == "":
|
||||
return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
|
||||
case c.Net.SASL.Enable == true && c.Net.SASL.Password == "":
|
||||
return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
|
||||
case c.Net.SASL.Enable:
|
||||
if c.Net.SASL.Mechanism == "" {
|
||||
c.Net.SASL.Mechanism = SASLTypePlaintext
|
||||
}
|
||||
|
||||
switch c.Net.SASL.Mechanism {
|
||||
case SASLTypePlaintext:
|
||||
if c.Net.SASL.User == "" {
|
||||
return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
|
||||
}
|
||||
if c.Net.SASL.Password == "" {
|
||||
return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
|
||||
}
|
||||
case SASLTypeOAuth:
|
||||
if c.Net.SASL.TokenProvider == nil {
|
||||
return ConfigurationError("An AccessTokenProvider instance must be provided to Net.SASL.TokenProvider")
|
||||
}
|
||||
case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512:
|
||||
if c.Net.SASL.User == "" {
|
||||
return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
|
||||
}
|
||||
if c.Net.SASL.Password == "" {
|
||||
return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
|
||||
}
|
||||
if c.Net.SASL.SCRAMClientGeneratorFunc == nil {
|
||||
return ConfigurationError("A SCRAMClientGeneratorFunc function must be provided to Net.SASL.SCRAMClientGeneratorFunc")
|
||||
}
|
||||
case SASLTypeGSSAPI:
|
||||
if c.Net.SASL.GSSAPI.ServiceName == "" {
|
||||
return ConfigurationError("Net.SASL.GSSAPI.ServiceName must not be empty when GSS-API mechanism is used")
|
||||
}
|
||||
|
||||
if c.Net.SASL.GSSAPI.AuthType == KRB5_USER_AUTH {
|
||||
if c.Net.SASL.GSSAPI.Password == "" {
|
||||
return ConfigurationError("Net.SASL.GSSAPI.Password must not be empty when GSS-API " +
|
||||
"mechanism is used and Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH")
|
||||
}
|
||||
} else if c.Net.SASL.GSSAPI.AuthType == KRB5_KEYTAB_AUTH {
|
||||
if c.Net.SASL.GSSAPI.KeyTabPath == "" {
|
||||
return ConfigurationError("Net.SASL.GSSAPI.KeyTabPath must not be empty when GSS-API mechanism is used" +
|
||||
" and Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH")
|
||||
}
|
||||
} else {
|
||||
return ConfigurationError("Net.SASL.GSSAPI.AuthType is invalid. Possible values are KRB5_USER_AUTH and KRB5_KEYTAB_AUTH")
|
||||
}
|
||||
if c.Net.SASL.GSSAPI.KerberosConfigPath == "" {
|
||||
return ConfigurationError("Net.SASL.GSSAPI.KerberosConfigPath must not be empty when GSS-API mechanism is used")
|
||||
}
|
||||
if c.Net.SASL.GSSAPI.Username == "" {
|
||||
return ConfigurationError("Net.SASL.GSSAPI.Username must not be empty when GSS-API mechanism is used")
|
||||
}
|
||||
if c.Net.SASL.GSSAPI.Realm == "" {
|
||||
return ConfigurationError("Net.SASL.GSSAPI.Realm must not be empty when GSS-API mechanism is used")
|
||||
}
|
||||
default:
|
||||
msg := fmt.Sprintf("The SASL mechanism configuration is invalid. Possible values are `%s`, `%s`, `%s`, `%s` and `%s`",
|
||||
SASLTypeOAuth, SASLTypePlaintext, SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512, SASLTypeGSSAPI)
|
||||
return ConfigurationError(msg)
|
||||
}
|
||||
}
|
||||
|
||||
// validate the Admin values
|
||||
|
@ -511,6 +669,25 @@ func (c *Config) Validate() error {
|
|||
}
|
||||
}
|
||||
|
||||
if c.Producer.Compression == CompressionZSTD && !c.Version.IsAtLeast(V2_1_0_0) {
|
||||
return ConfigurationError("zstd compression requires Version >= V2_1_0_0")
|
||||
}
|
||||
|
||||
if c.Producer.Idempotent {
|
||||
if !c.Version.IsAtLeast(V0_11_0_0) {
|
||||
return ConfigurationError("Idempotent producer requires Version >= V0_11_0_0")
|
||||
}
|
||||
if c.Producer.Retry.Max == 0 {
|
||||
return ConfigurationError("Idempotent producer requires Producer.Retry.Max >= 1")
|
||||
}
|
||||
if c.Producer.RequiredAcks != WaitForAll {
|
||||
return ConfigurationError("Idempotent producer requires Producer.RequiredAcks to be WaitForAll")
|
||||
}
|
||||
if c.Net.MaxOpenRequests > 1 {
|
||||
return ConfigurationError("Idempotent producer requires Net.MaxOpenRequests to be 1")
|
||||
}
|
||||
}
|
||||
|
||||
// validate the Consumer values
|
||||
switch {
|
||||
case c.Consumer.Fetch.Min <= 0:
|
||||
|
@ -525,12 +702,24 @@ func (c *Config) Validate() error {
|
|||
return ConfigurationError("Consumer.MaxProcessingTime must be > 0")
|
||||
case c.Consumer.Retry.Backoff < 0:
|
||||
return ConfigurationError("Consumer.Retry.Backoff must be >= 0")
|
||||
case c.Consumer.Offsets.CommitInterval <= 0:
|
||||
return ConfigurationError("Consumer.Offsets.CommitInterval must be > 0")
|
||||
case c.Consumer.Offsets.AutoCommit.Interval <= 0:
|
||||
return ConfigurationError("Consumer.Offsets.AutoCommit.Interval must be > 0")
|
||||
case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest:
|
||||
return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest")
|
||||
case c.Consumer.Offsets.Retry.Max < 0:
|
||||
return ConfigurationError("Consumer.Offsets.Retry.Max must be >= 0")
|
||||
case c.Consumer.IsolationLevel != ReadUncommitted && c.Consumer.IsolationLevel != ReadCommitted:
|
||||
return ConfigurationError("Consumer.IsolationLevel must be ReadUncommitted or ReadCommitted")
|
||||
}
|
||||
|
||||
if c.Consumer.Offsets.CommitInterval != 0 {
|
||||
Logger.Println("Deprecation warning: Consumer.Offsets.CommitInterval exists for historical compatibility" +
|
||||
" and should not be used. Please use Consumer.Offsets.AutoCommit, the current value will be ignored")
|
||||
}
|
||||
|
||||
// validate IsolationLevel
|
||||
if c.Consumer.IsolationLevel == ReadCommitted && !c.Version.IsAtLeast(V0_11_0_0) {
|
||||
return ConfigurationError("ReadCommitted requires Version >= V0_11_0_0")
|
||||
}
|
||||
|
||||
// validate the Consumer Group values
|
||||
|
@ -561,3 +750,16 @@ func (c *Config) Validate() error {
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Config) getDialer() proxy.Dialer {
|
||||
if c.Net.Proxy.Enable {
|
||||
Logger.Printf("using proxy %s", c.Net.Proxy.Dialer)
|
||||
return c.Net.Proxy.Dialer
|
||||
} else {
|
||||
return &net.Dialer{
|
||||
Timeout: c.Net.DialTimeout,
|
||||
KeepAlive: c.Net.KeepAlive,
|
||||
LocalAddr: c.Net.LocalAddr,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,15 +1,18 @@
|
|||
package sarama
|
||||
|
||||
// ConfigResourceType is a type for resources that have configs.
|
||||
type ConfigResourceType int8
|
||||
|
||||
// Taken from :
|
||||
// https://cwiki.apache.org/confluence/display/KAFKA/KIP-133%3A+Describe+and+Alter+Configs+Admin+APIs#KIP-133:DescribeandAlterConfigsAdminAPIs-WireFormattypes
|
||||
// Taken from:
|
||||
// https://github.com/apache/kafka/blob/ed7c071e07f1f90e4c2895582f61ca090ced3c42/clients/src/main/java/org/apache/kafka/common/config/ConfigResource.java#L32-L55
|
||||
|
||||
const (
|
||||
// UnknownResource constant type
|
||||
UnknownResource ConfigResourceType = 0
|
||||
AnyResource ConfigResourceType = 1
|
||||
TopicResource ConfigResourceType = 2
|
||||
GroupResource ConfigResourceType = 3
|
||||
ClusterResource ConfigResourceType = 4
|
||||
BrokerResource ConfigResourceType = 5
|
||||
// TopicResource constant type
|
||||
TopicResource ConfigResourceType = 2
|
||||
// BrokerResource constant type
|
||||
BrokerResource ConfigResourceType = 4
|
||||
// BrokerLoggerResource constant type
|
||||
BrokerLoggerResource ConfigResourceType = 8
|
||||
)
|
||||
|
|
|
@ -3,20 +3,24 @@ package sarama
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/rcrowley/go-metrics"
|
||||
)
|
||||
|
||||
// ConsumerMessage encapsulates a Kafka message returned by the consumer.
|
||||
type ConsumerMessage struct {
|
||||
Key, Value []byte
|
||||
Topic string
|
||||
Partition int32
|
||||
Offset int64
|
||||
Headers []*RecordHeader // only set if kafka is version 0.11+
|
||||
Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp
|
||||
BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp
|
||||
Headers []*RecordHeader // only set if kafka is version 0.11+
|
||||
|
||||
Key, Value []byte
|
||||
Topic string
|
||||
Partition int32
|
||||
Offset int64
|
||||
}
|
||||
|
||||
// ConsumerError is what is provided to the user when an error occurs.
|
||||
|
@ -31,6 +35,10 @@ func (ce ConsumerError) Error() string {
|
|||
return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err)
|
||||
}
|
||||
|
||||
func (ce ConsumerError) Unwrap() error {
|
||||
return ce.Err
|
||||
}
|
||||
|
||||
// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface.
|
||||
// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors
|
||||
// when stopping.
|
||||
|
@ -43,13 +51,7 @@ func (ce ConsumerErrors) Error() string {
|
|||
// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close()
|
||||
// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of
|
||||
// scope.
|
||||
//
|
||||
// Sarama's Consumer type does not currently support automatic consumer-group rebalancing and offset tracking.
|
||||
// For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka library
|
||||
// builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 and later), the
|
||||
// https://github.com/bsm/sarama-cluster library builds on Sarama to add this support.
|
||||
type Consumer interface {
|
||||
|
||||
// Topics returns the set of available topics as retrieved from the cluster
|
||||
// metadata. This method is the same as Client.Topics(), and is provided for
|
||||
// convenience.
|
||||
|
@ -75,13 +77,11 @@ type Consumer interface {
|
|||
}
|
||||
|
||||
type consumer struct {
|
||||
client Client
|
||||
conf *Config
|
||||
ownClient bool
|
||||
|
||||
lock sync.Mutex
|
||||
conf *Config
|
||||
children map[string]map[int32]*partitionConsumer
|
||||
brokerConsumers map[*Broker]*brokerConsumer
|
||||
client Client
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
// NewConsumer creates a new consumer using the given broker addresses and configuration.
|
||||
|
@ -90,18 +90,19 @@ func NewConsumer(addrs []string, config *Config) (Consumer, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c, err := NewConsumerFromClient(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.(*consumer).ownClient = true
|
||||
return c, nil
|
||||
return newConsumer(client)
|
||||
}
|
||||
|
||||
// NewConsumerFromClient creates a new consumer using the given client. It is still
|
||||
// necessary to call Close() on the underlying client when shutting down this consumer.
|
||||
func NewConsumerFromClient(client Client) (Consumer, error) {
|
||||
// For clients passed in by the client, ensure we don't
|
||||
// call Close() on it.
|
||||
cli := &nopCloserClient{client}
|
||||
return newConsumer(cli)
|
||||
}
|
||||
|
||||
func newConsumer(client Client) (Consumer, error) {
|
||||
// Check that we are not dealing with a closed Client before processing any other arguments
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
|
@ -118,10 +119,7 @@ func NewConsumerFromClient(client Client) (Consumer, error) {
|
|||
}
|
||||
|
||||
func (c *consumer) Close() error {
|
||||
if c.ownClient {
|
||||
return c.client.Close()
|
||||
}
|
||||
return nil
|
||||
return c.client.Close()
|
||||
}
|
||||
|
||||
func (c *consumer) Topics() ([]string, error) {
|
||||
|
@ -261,12 +259,11 @@ func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
|
|||
// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
|
||||
//
|
||||
// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of
|
||||
// consumer tear-down & return imediately. Continue to loop, servicing the Messages channel until the teardown process
|
||||
// consumer tear-down & return immediately. Continue to loop, servicing the Messages channel until the teardown process
|
||||
// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call
|
||||
// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will
|
||||
// also drain the Messages channel, harvest all errors & return them once cleanup has completed.
|
||||
type PartitionConsumer interface {
|
||||
|
||||
// AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you
|
||||
// should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this
|
||||
// function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call
|
||||
|
@ -298,22 +295,22 @@ type PartitionConsumer interface {
|
|||
|
||||
type partitionConsumer struct {
|
||||
highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||
consumer *consumer
|
||||
conf *Config
|
||||
topic string
|
||||
partition int32
|
||||
|
||||
consumer *consumer
|
||||
conf *Config
|
||||
broker *brokerConsumer
|
||||
messages chan *ConsumerMessage
|
||||
errors chan *ConsumerError
|
||||
feeder chan *FetchResponse
|
||||
|
||||
trigger, dying chan none
|
||||
responseResult error
|
||||
closeOnce sync.Once
|
||||
|
||||
fetchSize int32
|
||||
offset int64
|
||||
topic string
|
||||
partition int32
|
||||
responseResult error
|
||||
fetchSize int32
|
||||
offset int64
|
||||
retries int32
|
||||
}
|
||||
|
||||
var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing
|
||||
|
@ -332,12 +329,20 @@ func (child *partitionConsumer) sendError(err error) {
|
|||
}
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) computeBackoff() time.Duration {
|
||||
if child.conf.Consumer.Retry.BackoffFunc != nil {
|
||||
retries := atomic.AddInt32(&child.retries, 1)
|
||||
return child.conf.Consumer.Retry.BackoffFunc(int(retries))
|
||||
}
|
||||
return child.conf.Consumer.Retry.Backoff
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) dispatcher() {
|
||||
for range child.trigger {
|
||||
select {
|
||||
case <-child.dying:
|
||||
close(child.trigger)
|
||||
case <-time.After(child.conf.Consumer.Retry.Backoff):
|
||||
case <-time.After(child.computeBackoff()):
|
||||
if child.broker != nil {
|
||||
child.consumer.unrefBrokerConsumer(child.broker)
|
||||
child.broker = nil
|
||||
|
@ -421,19 +426,13 @@ func (child *partitionConsumer) AsyncClose() {
|
|||
func (child *partitionConsumer) Close() error {
|
||||
child.AsyncClose()
|
||||
|
||||
go withRecover(func() {
|
||||
for range child.messages {
|
||||
// drain
|
||||
}
|
||||
})
|
||||
|
||||
var errors ConsumerErrors
|
||||
var consumerErrors ConsumerErrors
|
||||
for err := range child.errors {
|
||||
errors = append(errors, err)
|
||||
consumerErrors = append(consumerErrors, err)
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
return errors
|
||||
if len(consumerErrors) > 0 {
|
||||
return consumerErrors
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -451,17 +450,32 @@ feederLoop:
|
|||
for response := range child.feeder {
|
||||
msgs, child.responseResult = child.parseResponse(response)
|
||||
|
||||
if child.responseResult == nil {
|
||||
atomic.StoreInt32(&child.retries, 0)
|
||||
}
|
||||
|
||||
for i, msg := range msgs {
|
||||
for _, interceptor := range child.conf.Consumer.Interceptors {
|
||||
msg.safelyApplyInterceptor(interceptor)
|
||||
}
|
||||
messageSelect:
|
||||
select {
|
||||
case <-child.dying:
|
||||
child.broker.acks.Done()
|
||||
continue feederLoop
|
||||
case child.messages <- msg:
|
||||
firstAttempt = true
|
||||
case <-expiryTicker.C:
|
||||
if !firstAttempt {
|
||||
child.responseResult = errTimedOut
|
||||
child.broker.acks.Done()
|
||||
remainingLoop:
|
||||
for _, msg = range msgs[i:] {
|
||||
child.messages <- msg
|
||||
select {
|
||||
case child.messages <- msg:
|
||||
case <-child.dying:
|
||||
break remainingLoop
|
||||
}
|
||||
}
|
||||
child.broker.input <- child
|
||||
continue feederLoop
|
||||
|
@ -487,9 +501,13 @@ func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMe
|
|||
for _, msgBlock := range msgSet.Messages {
|
||||
for _, msg := range msgBlock.Messages() {
|
||||
offset := msg.Offset
|
||||
timestamp := msg.Msg.Timestamp
|
||||
if msg.Msg.Version >= 1 {
|
||||
baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset
|
||||
offset += baseOffset
|
||||
if msg.Msg.LogAppendTime {
|
||||
timestamp = msgBlock.Msg.Timestamp
|
||||
}
|
||||
}
|
||||
if offset < child.offset {
|
||||
continue
|
||||
|
@ -500,43 +518,65 @@ func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMe
|
|||
Key: msg.Msg.Key,
|
||||
Value: msg.Msg.Value,
|
||||
Offset: offset,
|
||||
Timestamp: msg.Msg.Timestamp,
|
||||
Timestamp: timestamp,
|
||||
BlockTimestamp: msgBlock.Msg.Timestamp,
|
||||
})
|
||||
child.offset = offset + 1
|
||||
}
|
||||
}
|
||||
if len(messages) == 0 {
|
||||
return nil, ErrIncompleteResponse
|
||||
child.offset++
|
||||
}
|
||||
return messages, nil
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) {
|
||||
var messages []*ConsumerMessage
|
||||
messages := make([]*ConsumerMessage, 0, len(batch.Records))
|
||||
|
||||
for _, rec := range batch.Records {
|
||||
offset := batch.FirstOffset + rec.OffsetDelta
|
||||
if offset < child.offset {
|
||||
continue
|
||||
}
|
||||
timestamp := batch.FirstTimestamp.Add(rec.TimestampDelta)
|
||||
if batch.LogAppendTime {
|
||||
timestamp = batch.MaxTimestamp
|
||||
}
|
||||
messages = append(messages, &ConsumerMessage{
|
||||
Topic: child.topic,
|
||||
Partition: child.partition,
|
||||
Key: rec.Key,
|
||||
Value: rec.Value,
|
||||
Offset: offset,
|
||||
Timestamp: batch.FirstTimestamp.Add(rec.TimestampDelta),
|
||||
Timestamp: timestamp,
|
||||
Headers: rec.Headers,
|
||||
})
|
||||
child.offset = offset + 1
|
||||
}
|
||||
if len(messages) == 0 {
|
||||
child.offset += 1
|
||||
child.offset++
|
||||
}
|
||||
return messages, nil
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
|
||||
var (
|
||||
metricRegistry = child.conf.MetricRegistry
|
||||
consumerBatchSizeMetric metrics.Histogram
|
||||
)
|
||||
|
||||
if metricRegistry != nil {
|
||||
consumerBatchSizeMetric = getOrRegisterHistogram("consumer-batch-size", metricRegistry)
|
||||
}
|
||||
|
||||
// If request was throttled and empty we log and return without error
|
||||
if response.ThrottleTime != time.Duration(0) && len(response.Blocks) == 0 {
|
||||
Logger.Printf(
|
||||
"consumer/broker/%d FetchResponse throttled %v\n",
|
||||
child.broker.broker.ID(), response.ThrottleTime)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
block := response.GetBlock(child.topic, child.partition)
|
||||
if block == nil {
|
||||
return nil, ErrIncompleteResponse
|
||||
|
@ -550,6 +590,9 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
consumerBatchSizeMetric.Update(int64(nRecs))
|
||||
|
||||
if nRecs == 0 {
|
||||
partialTrailingMessage, err := block.isPartial()
|
||||
if err != nil {
|
||||
|
@ -564,6 +607,10 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu
|
|||
child.offset++ // skip this one so we can keep processing future messages
|
||||
} else {
|
||||
child.fetchSize *= 2
|
||||
// check int32 overflow
|
||||
if child.fetchSize < 0 {
|
||||
child.fetchSize = math.MaxInt32
|
||||
}
|
||||
if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
|
||||
child.fetchSize = child.conf.Consumer.Fetch.Max
|
||||
}
|
||||
|
@ -577,7 +624,13 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu
|
|||
child.fetchSize = child.conf.Consumer.Fetch.Default
|
||||
atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset)
|
||||
|
||||
messages := []*ConsumerMessage{}
|
||||
// abortedProducerIDs contains producerID which message should be ignored as uncommitted
|
||||
// - producerID are added when the partitionConsumer iterate over the offset at which an aborted transaction begins (abortedTransaction.FirstOffset)
|
||||
// - producerID are removed when partitionConsumer iterate over an aborted controlRecord, meaning the aborted transaction for this producer is over
|
||||
abortedProducerIDs := make(map[int64]struct{}, len(block.AbortedTransactions))
|
||||
abortedTransactions := block.getAbortedTransactions()
|
||||
|
||||
var messages []*ConsumerMessage
|
||||
for _, records := range block.RecordsSet {
|
||||
switch records.recordsType {
|
||||
case legacyRecords:
|
||||
|
@ -588,13 +641,55 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu
|
|||
|
||||
messages = append(messages, messageSetMessages...)
|
||||
case defaultRecords:
|
||||
// Consume remaining abortedTransaction up to last offset of current batch
|
||||
for _, txn := range abortedTransactions {
|
||||
if txn.FirstOffset > records.RecordBatch.LastOffset() {
|
||||
break
|
||||
}
|
||||
abortedProducerIDs[txn.ProducerID] = struct{}{}
|
||||
// Pop abortedTransactions so that we never add it again
|
||||
abortedTransactions = abortedTransactions[1:]
|
||||
}
|
||||
|
||||
recordBatchMessages, err := child.parseRecords(records.RecordBatch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if control, err := records.isControl(); err != nil || control {
|
||||
|
||||
// Parse and commit offset but do not expose messages that are:
|
||||
// - control records
|
||||
// - part of an aborted transaction when set to `ReadCommitted`
|
||||
|
||||
// control record
|
||||
isControl, err := records.isControl()
|
||||
if err != nil {
|
||||
// I don't know why there is this continue in case of error to begin with
|
||||
// Safe bet is to ignore control messages if ReadUncommitted
|
||||
// and block on them in case of error and ReadCommitted
|
||||
if child.conf.Consumer.IsolationLevel == ReadCommitted {
|
||||
return nil, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if isControl {
|
||||
controlRecord, err := records.getControlRecord()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if controlRecord.Type == ControlRecordAbort {
|
||||
delete(abortedProducerIDs, records.RecordBatch.ProducerID)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// filter aborted transactions
|
||||
if child.conf.Consumer.IsolationLevel == ReadCommitted {
|
||||
_, isAborted := abortedProducerIDs[records.RecordBatch.ProducerID]
|
||||
if records.RecordBatch.IsTransactional && isAborted {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
messages = append(messages, recordBatchMessages...)
|
||||
default:
|
||||
|
@ -605,15 +700,13 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu
|
|||
return messages, nil
|
||||
}
|
||||
|
||||
// brokerConsumer
|
||||
|
||||
type brokerConsumer struct {
|
||||
consumer *consumer
|
||||
broker *Broker
|
||||
input chan *partitionConsumer
|
||||
newSubscriptions chan []*partitionConsumer
|
||||
wait chan none
|
||||
subscriptions map[*partitionConsumer]none
|
||||
wait chan none
|
||||
acks sync.WaitGroup
|
||||
refs int
|
||||
}
|
||||
|
@ -635,14 +728,14 @@ func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer {
|
|||
return bc
|
||||
}
|
||||
|
||||
// The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
|
||||
// goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
|
||||
// up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
|
||||
// it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available,
|
||||
// so the main goroutine can block waiting for work if it has none.
|
||||
func (bc *brokerConsumer) subscriptionManager() {
|
||||
var buffer []*partitionConsumer
|
||||
|
||||
// The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
|
||||
// goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
|
||||
// up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
|
||||
// it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available,
|
||||
// so the main goroutine can block waiting for work if it has none.
|
||||
for {
|
||||
if len(buffer) > 0 {
|
||||
select {
|
||||
|
@ -675,10 +768,10 @@ done:
|
|||
close(bc.newSubscriptions)
|
||||
}
|
||||
|
||||
//subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
|
||||
func (bc *brokerConsumer) subscriptionConsumer() {
|
||||
<-bc.wait // wait for our first piece of work
|
||||
|
||||
// the subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
|
||||
for newSubscriptions := range bc.newSubscriptions {
|
||||
bc.updateSubscriptions(newSubscriptions)
|
||||
|
||||
|
@ -719,20 +812,20 @@ func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsu
|
|||
close(child.trigger)
|
||||
delete(bc.subscriptions, child)
|
||||
default:
|
||||
break
|
||||
// no-op
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//handleResponses handles the response codes left for us by our subscriptions, and abandons ones that have been closed
|
||||
func (bc *brokerConsumer) handleResponses() {
|
||||
// handles the response codes left for us by our subscriptions, and abandons ones that have been closed
|
||||
for child := range bc.subscriptions {
|
||||
result := child.responseResult
|
||||
child.responseResult = nil
|
||||
|
||||
switch result {
|
||||
case nil:
|
||||
break
|
||||
// no-op
|
||||
case errTimedOut:
|
||||
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n",
|
||||
bc.broker.ID(), child.topic, child.partition)
|
||||
|
@ -787,6 +880,9 @@ func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
|
|||
MinBytes: bc.consumer.conf.Consumer.Fetch.Min,
|
||||
MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond),
|
||||
}
|
||||
if bc.consumer.conf.Version.IsAtLeast(V0_9_0_0) {
|
||||
request.Version = 1
|
||||
}
|
||||
if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) {
|
||||
request.Version = 2
|
||||
}
|
||||
|
@ -796,7 +892,22 @@ func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
|
|||
}
|
||||
if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) {
|
||||
request.Version = 4
|
||||
request.Isolation = ReadUncommitted // We don't support yet transactions.
|
||||
request.Isolation = bc.consumer.conf.Consumer.IsolationLevel
|
||||
}
|
||||
if bc.consumer.conf.Version.IsAtLeast(V1_1_0_0) {
|
||||
request.Version = 7
|
||||
// We do not currently implement KIP-227 FetchSessions. Setting the id to 0
|
||||
// and the epoch to -1 tells the broker not to generate as session ID we're going
|
||||
// to just ignore anyway.
|
||||
request.SessionID = 0
|
||||
request.SessionEpoch = -1
|
||||
}
|
||||
if bc.consumer.conf.Version.IsAtLeast(V2_1_0_0) {
|
||||
request.Version = 10
|
||||
}
|
||||
if bc.consumer.conf.Version.IsAtLeast(V2_3_0_0) {
|
||||
request.Version = 11
|
||||
request.RackID = bc.consumer.conf.RackID
|
||||
}
|
||||
|
||||
for child := range bc.subscriptions {
|
||||
|
|
|
@ -33,11 +33,14 @@ type ConsumerGroup interface {
|
|||
// to allow the user to perform any final tasks before a rebalance.
|
||||
// 6. Finally, marked offsets are committed one last time before claims are released.
|
||||
//
|
||||
// Please note, that once a relance is triggered, sessions must be completed within
|
||||
// Please note, that once a rebalance is triggered, sessions must be completed within
|
||||
// Config.Consumer.Group.Rebalance.Timeout. This means that ConsumeClaim() functions must exit
|
||||
// as quickly as possible to allow time for Cleanup() and the final offset commit. If the timeout
|
||||
// is exceeded, the consumer will be removed from the group by Kafka, which will cause offset
|
||||
// commit failures.
|
||||
// This method should be called inside an infinite loop, when a
|
||||
// server-side rebalance happens, the consumer session will need to be
|
||||
// recreated to get the new claims.
|
||||
Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error
|
||||
|
||||
// Errors returns a read channel of errors that occurred during the consumer life-cycle.
|
||||
|
@ -52,8 +55,7 @@ type ConsumerGroup interface {
|
|||
}
|
||||
|
||||
type consumerGroup struct {
|
||||
client Client
|
||||
ownClient bool
|
||||
client Client
|
||||
|
||||
config *Config
|
||||
consumer Consumer
|
||||
|
@ -64,6 +66,8 @@ type consumerGroup struct {
|
|||
lock sync.Mutex
|
||||
closed chan none
|
||||
closeOnce sync.Once
|
||||
|
||||
userData []byte
|
||||
}
|
||||
|
||||
// NewConsumerGroup creates a new consumer group the given broker addresses and configuration.
|
||||
|
@ -73,20 +77,24 @@ func NewConsumerGroup(addrs []string, groupID string, config *Config) (ConsumerG
|
|||
return nil, err
|
||||
}
|
||||
|
||||
c, err := NewConsumerGroupFromClient(groupID, client)
|
||||
c, err := newConsumerGroup(groupID, client)
|
||||
if err != nil {
|
||||
_ = client.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.(*consumerGroup).ownClient = true
|
||||
return c, nil
|
||||
return c, err
|
||||
}
|
||||
|
||||
// NewConsumerFromClient creates a new consumer group using the given client. It is still
|
||||
// NewConsumerGroupFromClient creates a new consumer group using the given client. It is still
|
||||
// necessary to call Close() on the underlying client when shutting down this consumer.
|
||||
// PLEASE NOTE: consumer groups can only re-use but not share clients.
|
||||
func NewConsumerGroupFromClient(groupID string, client Client) (ConsumerGroup, error) {
|
||||
// For clients passed in by the client, ensure we don't
|
||||
// call Close() on it.
|
||||
cli := &nopCloserClient{client}
|
||||
return newConsumerGroup(groupID, cli)
|
||||
}
|
||||
|
||||
func newConsumerGroup(groupID string, client Client) (ConsumerGroup, error) {
|
||||
config := client.Config()
|
||||
if !config.Version.IsAtLeast(V0_10_2_0) {
|
||||
return nil, ConfigurationError("consumer groups require Version to be >= V0_10_2_0")
|
||||
|
@ -115,9 +123,6 @@ func (c *consumerGroup) Close() (err error) {
|
|||
c.closeOnce.Do(func() {
|
||||
close(c.closed)
|
||||
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
// leave group
|
||||
if e := c.leave(); e != nil {
|
||||
err = e
|
||||
|
@ -131,10 +136,8 @@ func (c *consumerGroup) Close() (err error) {
|
|||
err = e
|
||||
}
|
||||
|
||||
if c.ownClient {
|
||||
if e := c.client.Close(); e != nil {
|
||||
err = e
|
||||
}
|
||||
if e := c.client.Close(); e != nil {
|
||||
err = e
|
||||
}
|
||||
})
|
||||
return
|
||||
|
@ -162,20 +165,19 @@ func (c *consumerGroup) Consume(ctx context.Context, topics []string, handler Co
|
|||
return err
|
||||
}
|
||||
|
||||
// Get coordinator
|
||||
coordinator, err := c.client.Coordinator(c.groupID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Init session
|
||||
sess, err := c.newSession(ctx, coordinator, topics, handler, c.config.Consumer.Group.Rebalance.Retry.Max)
|
||||
sess, err := c.newSession(ctx, topics, handler, c.config.Consumer.Group.Rebalance.Retry.Max)
|
||||
if err == ErrClosedClient {
|
||||
return ErrClosedConsumerGroup
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// loop check topic partition numbers changed
|
||||
// will trigger rebalance when any topic partitions number had changed
|
||||
// avoid Consume function called again that will generate more than loopCheckPartitionNumbers coroutine
|
||||
go c.loopCheckPartitionNumbers(topics, sess)
|
||||
|
||||
// Wait for session exit signal
|
||||
<-sess.ctx.Done()
|
||||
|
||||
|
@ -183,7 +185,33 @@ func (c *consumerGroup) Consume(ctx context.Context, topics []string, handler Co
|
|||
return sess.release(true)
|
||||
}
|
||||
|
||||
func (c *consumerGroup) newSession(ctx context.Context, coordinator *Broker, topics []string, handler ConsumerGroupHandler, retries int) (*consumerGroupSession, error) {
|
||||
func (c *consumerGroup) retryNewSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int, refreshCoordinator bool) (*consumerGroupSession, error) {
|
||||
select {
|
||||
case <-c.closed:
|
||||
return nil, ErrClosedConsumerGroup
|
||||
case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff):
|
||||
}
|
||||
|
||||
if refreshCoordinator {
|
||||
err := c.client.RefreshCoordinator(c.groupID)
|
||||
if err != nil {
|
||||
return c.retryNewSession(ctx, topics, handler, retries, true)
|
||||
}
|
||||
}
|
||||
|
||||
return c.newSession(ctx, topics, handler, retries-1)
|
||||
}
|
||||
|
||||
func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int) (*consumerGroupSession, error) {
|
||||
coordinator, err := c.client.Coordinator(c.groupID)
|
||||
if err != nil {
|
||||
if retries <= 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c.retryNewSession(ctx, topics, handler, retries, true)
|
||||
}
|
||||
|
||||
// Join consumer group
|
||||
join, err := c.joinGroupRequest(coordinator, topics)
|
||||
if err != nil {
|
||||
|
@ -195,19 +223,19 @@ func (c *consumerGroup) newSession(ctx context.Context, coordinator *Broker, top
|
|||
c.memberID = join.MemberId
|
||||
case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately
|
||||
c.memberID = ""
|
||||
return c.newSession(ctx, coordinator, topics, handler, retries)
|
||||
return c.newSession(ctx, topics, handler, retries)
|
||||
case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh
|
||||
if retries <= 0 {
|
||||
return nil, join.Err
|
||||
}
|
||||
|
||||
return c.retryNewSession(ctx, topics, handler, retries, true)
|
||||
case ErrRebalanceInProgress: // retry after backoff
|
||||
if retries <= 0 {
|
||||
return nil, join.Err
|
||||
}
|
||||
|
||||
select {
|
||||
case <-c.closed:
|
||||
return nil, ErrClosedConsumerGroup
|
||||
case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff):
|
||||
}
|
||||
|
||||
return c.newSession(ctx, coordinator, topics, handler, retries-1)
|
||||
return c.retryNewSession(ctx, topics, handler, retries, false)
|
||||
default:
|
||||
return nil, join.Err
|
||||
}
|
||||
|
@ -227,47 +255,48 @@ func (c *consumerGroup) newSession(ctx context.Context, coordinator *Broker, top
|
|||
}
|
||||
|
||||
// Sync consumer group
|
||||
sync, err := c.syncGroupRequest(coordinator, plan, join.GenerationId)
|
||||
groupRequest, err := c.syncGroupRequest(coordinator, plan, join.GenerationId)
|
||||
if err != nil {
|
||||
_ = coordinator.Close()
|
||||
return nil, err
|
||||
}
|
||||
switch sync.Err {
|
||||
switch groupRequest.Err {
|
||||
case ErrNoError:
|
||||
case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately
|
||||
c.memberID = ""
|
||||
return c.newSession(ctx, coordinator, topics, handler, retries)
|
||||
return c.newSession(ctx, topics, handler, retries)
|
||||
case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh
|
||||
if retries <= 0 {
|
||||
return nil, groupRequest.Err
|
||||
}
|
||||
|
||||
return c.retryNewSession(ctx, topics, handler, retries, true)
|
||||
case ErrRebalanceInProgress: // retry after backoff
|
||||
if retries <= 0 {
|
||||
return nil, sync.Err
|
||||
return nil, groupRequest.Err
|
||||
}
|
||||
|
||||
select {
|
||||
case <-c.closed:
|
||||
return nil, ErrClosedConsumerGroup
|
||||
case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff):
|
||||
}
|
||||
|
||||
return c.newSession(ctx, coordinator, topics, handler, retries-1)
|
||||
return c.retryNewSession(ctx, topics, handler, retries, false)
|
||||
default:
|
||||
return nil, sync.Err
|
||||
return nil, groupRequest.Err
|
||||
}
|
||||
|
||||
// Retrieve and sort claims
|
||||
var claims map[string][]int32
|
||||
if len(sync.MemberAssignment) > 0 {
|
||||
members, err := sync.GetMemberAssignment()
|
||||
if len(groupRequest.MemberAssignment) > 0 {
|
||||
members, err := groupRequest.GetMemberAssignment()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
claims = members.Topics
|
||||
c.userData = members.UserData
|
||||
|
||||
for _, partitions := range claims {
|
||||
sort.Sort(int32Slice(partitions))
|
||||
}
|
||||
}
|
||||
|
||||
return newConsumerGroupSession(c, ctx, claims, join.MemberId, join.GenerationId, handler)
|
||||
return newConsumerGroupSession(ctx, c, claims, join.MemberId, join.GenerationId, handler)
|
||||
}
|
||||
|
||||
func (c *consumerGroup) joinGroupRequest(coordinator *Broker, topics []string) (*JoinGroupResponse, error) {
|
||||
|
@ -282,9 +311,14 @@ func (c *consumerGroup) joinGroupRequest(coordinator *Broker, topics []string) (
|
|||
req.RebalanceTimeout = int32(c.config.Consumer.Group.Rebalance.Timeout / time.Millisecond)
|
||||
}
|
||||
|
||||
// use static user-data if configured, otherwise use consumer-group userdata from the last sync
|
||||
userData := c.config.Consumer.Group.Member.UserData
|
||||
if len(userData) == 0 {
|
||||
userData = c.userData
|
||||
}
|
||||
meta := &ConsumerGroupMemberMetadata{
|
||||
Topics: topics,
|
||||
UserData: c.config.Consumer.Group.Member.UserData,
|
||||
UserData: userData,
|
||||
}
|
||||
strategy := c.config.Consumer.Group.Rebalance.Strategy
|
||||
if err := req.AddGroupProtocolMetadata(strategy.Name(), meta); err != nil {
|
||||
|
@ -300,13 +334,17 @@ func (c *consumerGroup) syncGroupRequest(coordinator *Broker, plan BalanceStrate
|
|||
MemberId: c.memberID,
|
||||
GenerationId: generationID,
|
||||
}
|
||||
strategy := c.config.Consumer.Group.Rebalance.Strategy
|
||||
for memberID, topics := range plan {
|
||||
err := req.AddGroupAssignmentMember(memberID, &ConsumerGroupMemberAssignment{
|
||||
Topics: topics,
|
||||
})
|
||||
assignment := &ConsumerGroupMemberAssignment{Topics: topics}
|
||||
userDataBytes, err := strategy.AssignmentData(memberID, topics, generationID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
assignment.UserData = userDataBytes
|
||||
if err := req.AddGroupAssignmentMember(memberID, assignment); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return coordinator.SyncGroup(req)
|
||||
}
|
||||
|
@ -341,8 +379,10 @@ func (c *consumerGroup) balance(members map[string]ConsumerGroupMemberMetadata)
|
|||
return strategy.Plan(members, topics)
|
||||
}
|
||||
|
||||
// Leaves the cluster, called by Close, protected by lock.
|
||||
// Leaves the cluster, called by Close.
|
||||
func (c *consumerGroup) leave() error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if c.memberID == "" {
|
||||
return nil
|
||||
}
|
||||
|
@ -374,12 +414,6 @@ func (c *consumerGroup) leave() error {
|
|||
}
|
||||
|
||||
func (c *consumerGroup) handleError(err error, topic string, partition int32) {
|
||||
select {
|
||||
case <-c.closed:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
if _, ok := err.(*ConsumerError); !ok && topic != "" && partition > -1 {
|
||||
err = &ConsumerError{
|
||||
Topic: topic,
|
||||
|
@ -388,14 +422,67 @@ func (c *consumerGroup) handleError(err error, topic string, partition int32) {
|
|||
}
|
||||
}
|
||||
|
||||
if c.config.Consumer.Return.Errors {
|
||||
select {
|
||||
case c.errors <- err:
|
||||
default:
|
||||
}
|
||||
} else {
|
||||
if !c.config.Consumer.Return.Errors {
|
||||
Logger.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case <-c.closed:
|
||||
//consumer is closed
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
select {
|
||||
case c.errors <- err:
|
||||
default:
|
||||
// no error listener
|
||||
}
|
||||
}
|
||||
|
||||
func (c *consumerGroup) loopCheckPartitionNumbers(topics []string, session *consumerGroupSession) {
|
||||
pause := time.NewTicker(c.config.Metadata.RefreshFrequency)
|
||||
defer session.cancel()
|
||||
defer pause.Stop()
|
||||
var oldTopicToPartitionNum map[string]int
|
||||
var err error
|
||||
if oldTopicToPartitionNum, err = c.topicToPartitionNumbers(topics); err != nil {
|
||||
return
|
||||
}
|
||||
for {
|
||||
if newTopicToPartitionNum, err := c.topicToPartitionNumbers(topics); err != nil {
|
||||
return
|
||||
} else {
|
||||
for topic, num := range oldTopicToPartitionNum {
|
||||
if newTopicToPartitionNum[topic] != num {
|
||||
return // trigger the end of the session on exit
|
||||
}
|
||||
}
|
||||
}
|
||||
select {
|
||||
case <-pause.C:
|
||||
case <-session.ctx.Done():
|
||||
Logger.Printf("loop check partition number coroutine will exit, topics %s", topics)
|
||||
// if session closed by other, should be exited
|
||||
return
|
||||
case <-c.closed:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *consumerGroup) topicToPartitionNumbers(topics []string) (map[string]int, error) {
|
||||
topicToPartitionNum := make(map[string]int, len(topics))
|
||||
for _, topic := range topics {
|
||||
if partitionNum, err := c.client.Partitions(topic); err != nil {
|
||||
Logger.Printf("Consumer Group topic %s get partition number failed %v", topic, err)
|
||||
return nil, err
|
||||
} else {
|
||||
topicToPartitionNum[topic] = len(partitionNum)
|
||||
}
|
||||
}
|
||||
return topicToPartitionNum, nil
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------
|
||||
|
@ -426,6 +513,11 @@ type ConsumerGroupSession interface {
|
|||
// message twice, and your processing should ideally be idempotent.
|
||||
MarkOffset(topic string, partition int32, offset int64, metadata string)
|
||||
|
||||
// Commit the offset to the backend
|
||||
//
|
||||
// Note: calling Commit performs a blocking synchronous operation.
|
||||
Commit()
|
||||
|
||||
// ResetOffset resets to the provided offset, alongside a metadata string that
|
||||
// represents the state of the partition consumer at that point in time. Reset
|
||||
// acts as a counterpart to MarkOffset, the difference being that it allows to
|
||||
|
@ -456,7 +548,7 @@ type consumerGroupSession struct {
|
|||
hbDying, hbDead chan none
|
||||
}
|
||||
|
||||
func newConsumerGroupSession(parent *consumerGroup, ctx context.Context, claims map[string][]int32, memberID string, generationID int32, handler ConsumerGroupHandler) (*consumerGroupSession, error) {
|
||||
func newConsumerGroupSession(ctx context.Context, parent *consumerGroup, claims map[string][]int32, memberID string, generationID int32, handler ConsumerGroupHandler) (*consumerGroupSession, error) {
|
||||
// init offset manager
|
||||
offsets, err := newOffsetManagerFromClient(parent.groupID, memberID, generationID, parent.client)
|
||||
if err != nil {
|
||||
|
@ -537,6 +629,10 @@ func (s *consumerGroupSession) MarkOffset(topic string, partition int32, offset
|
|||
}
|
||||
}
|
||||
|
||||
func (s *consumerGroupSession) Commit() {
|
||||
s.offsets.Commit()
|
||||
}
|
||||
|
||||
func (s *consumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) {
|
||||
if pom := s.offsets.findPOM(topic, partition); pom != nil {
|
||||
pom.ResetOffset(offset, metadata)
|
||||
|
@ -595,7 +691,7 @@ func (s *consumerGroupSession) consume(topic string, partition int32) {
|
|||
s.parent.handleError(err, topic, partition)
|
||||
}
|
||||
|
||||
// ensure consumer is clased & drained
|
||||
// ensure consumer is closed & drained
|
||||
claim.AsyncClose()
|
||||
for _, err := range claim.waitClosed() {
|
||||
s.parent.handleError(err, topic, partition)
|
||||
|
@ -613,7 +709,7 @@ func (s *consumerGroupSession) release(withCleanup bool) (err error) {
|
|||
s.releaseOnce.Do(func() {
|
||||
if withCleanup {
|
||||
if e := s.handler.Cleanup(s); e != nil {
|
||||
s.parent.handleError(err, "", -1)
|
||||
s.parent.handleError(e, "", -1)
|
||||
err = e
|
||||
}
|
||||
}
|
||||
|
@ -657,6 +753,12 @@ func (s *consumerGroupSession) heartbeatLoop() {
|
|||
resp, err := s.parent.heartbeatRequest(coordinator, s.memberID, s.generationID)
|
||||
if err != nil {
|
||||
_ = coordinator.Close()
|
||||
|
||||
if retries <= 0 {
|
||||
s.parent.handleError(err, "", -1)
|
||||
return
|
||||
}
|
||||
|
||||
retries--
|
||||
continue
|
||||
}
|
||||
|
@ -667,7 +769,7 @@ func (s *consumerGroupSession) heartbeatLoop() {
|
|||
case ErrRebalanceInProgress, ErrUnknownMemberId, ErrIllegalGeneration:
|
||||
return
|
||||
default:
|
||||
s.parent.handleError(err, "", -1)
|
||||
s.parent.handleError(resp.Err, "", -1)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -691,7 +793,7 @@ type ConsumerGroupHandler interface {
|
|||
// Setup is run at the beginning of a new session, before ConsumeClaim.
|
||||
Setup(ConsumerGroupSession) error
|
||||
|
||||
// Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exites
|
||||
// Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited
|
||||
// but before the offsets are committed for the very last time.
|
||||
Cleanup(ConsumerGroupSession) error
|
||||
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
package sarama
|
||||
|
||||
//ConsumerGroupMemberMetadata holds the metadata for consumer group
|
||||
type ConsumerGroupMemberMetadata struct {
|
||||
Version int16
|
||||
Topics []string
|
||||
|
@ -36,6 +37,7 @@ func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
//ConsumerGroupMemberAssignment holds the member assignment for a consume group
|
||||
type ConsumerGroupMemberAssignment struct {
|
||||
Version int16
|
||||
Topics map[string][]int32
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
package sarama
|
||||
|
||||
//ConsumerMetadataRequest is used for metadata requests
|
||||
type ConsumerMetadataRequest struct {
|
||||
ConsumerGroup string
|
||||
}
|
||||
|
@ -28,6 +29,10 @@ func (r *ConsumerMetadataRequest) version() int16 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataRequest) headerVersion() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion {
|
||||
return V0_8_2_0
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"strconv"
|
||||
)
|
||||
|
||||
//ConsumerMetadataResponse holds the response for a consumer group meta data requests
|
||||
type ConsumerMetadataResponse struct {
|
||||
Err KError
|
||||
Coordinator *Broker
|
||||
|
@ -72,6 +73,10 @@ func (r *ConsumerMetadataResponse) version() int16 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataResponse) headerVersion() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion {
|
||||
return V0_8_2_0
|
||||
}
|
||||
|
|
|
@ -0,0 +1,72 @@
|
|||
package sarama
|
||||
|
||||
//ControlRecordType ...
|
||||
type ControlRecordType int
|
||||
|
||||
const (
|
||||
//ControlRecordAbort is a control record for abort
|
||||
ControlRecordAbort ControlRecordType = iota
|
||||
//ControlRecordCommit is a control record for commit
|
||||
ControlRecordCommit
|
||||
//ControlRecordUnknown is a control record of unknown type
|
||||
ControlRecordUnknown
|
||||
)
|
||||
|
||||
// Control records are returned as a record by fetchRequest
|
||||
// However unlike "normal" records, they mean nothing application wise.
|
||||
// They only serve internal logic for supporting transactions.
|
||||
type ControlRecord struct {
|
||||
Version int16
|
||||
CoordinatorEpoch int32
|
||||
Type ControlRecordType
|
||||
}
|
||||
|
||||
func (cr *ControlRecord) decode(key, value packetDecoder) error {
|
||||
var err error
|
||||
cr.Version, err = value.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cr.CoordinatorEpoch, err = value.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// There a version for the value part AND the key part. And I have no idea if they are supposed to match or not
|
||||
// Either way, all these version can only be 0 for now
|
||||
cr.Version, err = key.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
recordType, err := key.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch recordType {
|
||||
case 0:
|
||||
cr.Type = ControlRecordAbort
|
||||
case 1:
|
||||
cr.Type = ControlRecordCommit
|
||||
default:
|
||||
// from JAVA implementation:
|
||||
// UNKNOWN is used to indicate a control type which the client is not aware of and should be ignored
|
||||
cr.Type = ControlRecordUnknown
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cr *ControlRecord) encode(key, value packetEncoder) {
|
||||
value.putInt16(cr.Version)
|
||||
value.putInt32(cr.CoordinatorEpoch)
|
||||
key.putInt16(cr.Version)
|
||||
|
||||
switch cr.Type {
|
||||
case ControlRecordAbort:
|
||||
key.putInt16(0)
|
||||
case ControlRecordCommit:
|
||||
key.putInt16(1)
|
||||
}
|
||||
}
|
|
@ -4,6 +4,7 @@ import (
|
|||
"encoding/binary"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type crcPolynomial int8
|
||||
|
@ -13,6 +14,22 @@ const (
|
|||
crcCastagnoli
|
||||
)
|
||||
|
||||
var crc32FieldPool = sync.Pool{}
|
||||
|
||||
func acquireCrc32Field(polynomial crcPolynomial) *crc32Field {
|
||||
val := crc32FieldPool.Get()
|
||||
if val != nil {
|
||||
c := val.(*crc32Field)
|
||||
c.polynomial = polynomial
|
||||
return c
|
||||
}
|
||||
return newCRC32Field(polynomial)
|
||||
}
|
||||
|
||||
func releaseCrc32Field(c *crc32Field) {
|
||||
crc32FieldPool.Put(c)
|
||||
}
|
||||
|
||||
var castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
|
||||
|
||||
// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s.
|
||||
|
|
|
@ -67,6 +67,10 @@ func (r *CreatePartitionsRequest) version() int16 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (r *CreatePartitionsRequest) headerVersion() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion {
|
||||
return V1_0_0_0
|
||||
}
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
package sarama
|
||||
|
||||
import "time"
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
type CreatePartitionsResponse struct {
|
||||
ThrottleTime time.Duration
|
||||
|
@ -60,6 +63,10 @@ func (r *CreatePartitionsResponse) version() int16 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (r *CreatePartitionsResponse) headerVersion() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion {
|
||||
return V1_0_0_0
|
||||
}
|
||||
|
@ -69,6 +76,14 @@ type TopicPartitionError struct {
|
|||
ErrMsg *string
|
||||
}
|
||||
|
||||
func (t *TopicPartitionError) Error() string {
|
||||
text := t.Err.Error()
|
||||
if t.ErrMsg != nil {
|
||||
text = fmt.Sprintf("%s - %s", text, *t.ErrMsg)
|
||||
}
|
||||
return text
|
||||
}
|
||||
|
||||
func (t *TopicPartitionError) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(t.Err))
|
||||
|
||||
|
|
|
@ -79,6 +79,10 @@ func (c *CreateTopicsRequest) version() int16 {
|
|||
return c.Version
|
||||
}
|
||||
|
||||
func (r *CreateTopicsRequest) headerVersion() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (c *CreateTopicsRequest) requiredVersion() KafkaVersion {
|
||||
switch c.Version {
|
||||
case 2:
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
package sarama
|
||||
|
||||
import "time"
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
type CreateTopicsResponse struct {
|
||||
Version int16
|
||||
|
@ -67,6 +70,10 @@ func (c *CreateTopicsResponse) version() int16 {
|
|||
return c.Version
|
||||
}
|
||||
|
||||
func (c *CreateTopicsResponse) headerVersion() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (c *CreateTopicsResponse) requiredVersion() KafkaVersion {
|
||||
switch c.Version {
|
||||
case 2:
|
||||
|
@ -83,6 +90,14 @@ type TopicError struct {
|
|||
ErrMsg *string
|
||||
}
|
||||
|
||||
func (t *TopicError) Error() string {
|
||||
text := t.Err.Error()
|
||||
if t.ErrMsg != nil {
|
||||
text = fmt.Sprintf("%s - %s", text, *t.ErrMsg)
|
||||
}
|
||||
return text
|
||||
}
|
||||
|
||||
func (t *TopicError) encode(pe packetEncoder, version int16) error {
|
||||
pe.putInt16(int16(t.Err))
|
||||
|
||||
|
|
|
@ -0,0 +1,63 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
|
||||
snappy "github.com/eapache/go-xerial-snappy"
|
||||
"github.com/pierrec/lz4"
|
||||
)
|
||||
|
||||
var (
|
||||
lz4ReaderPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return lz4.NewReader(nil)
|
||||
},
|
||||
}
|
||||
|
||||
gzipReaderPool sync.Pool
|
||||
)
|
||||
|
||||
func decompress(cc CompressionCodec, data []byte) ([]byte, error) {
|
||||
switch cc {
|
||||
case CompressionNone:
|
||||
return data, nil
|
||||
case CompressionGZIP:
|
||||
var (
|
||||
err error
|
||||
reader *gzip.Reader
|
||||
readerIntf = gzipReaderPool.Get()
|
||||
)
|
||||
if readerIntf != nil {
|
||||
reader = readerIntf.(*gzip.Reader)
|
||||
} else {
|
||||
reader, err = gzip.NewReader(bytes.NewReader(data))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
defer gzipReaderPool.Put(reader)
|
||||
|
||||
if err := reader.Reset(bytes.NewReader(data)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ioutil.ReadAll(reader)
|
||||
case CompressionSnappy:
|
||||
return snappy.Decode(data)
|
||||
case CompressionLZ4:
|
||||
reader := lz4ReaderPool.Get().(*lz4.Reader)
|
||||
defer lz4ReaderPool.Put(reader)
|
||||
|
||||
reader.Reset(bytes.NewReader(data))
|
||||
return ioutil.ReadAll(reader)
|
||||
case CompressionZSTD:
|
||||
return zstdDecompress(nil, data)
|
||||
default:
|
||||
return nil, PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", cc)}
|
||||
}
|
||||
}
|
|
@ -21,6 +21,10 @@ func (r *DeleteGroupsRequest) version() int16 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (r *DeleteGroupsRequest) headerVersion() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (r *DeleteGroupsRequest) requiredVersion() KafkaVersion {
|
||||
return V1_1_0_0
|
||||
}
|
||||
|
|
|
@ -65,6 +65,10 @@ func (r *DeleteGroupsResponse) version() int16 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (r *DeleteGroupsResponse) headerVersion() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *DeleteGroupsResponse) requiredVersion() KafkaVersion {
|
||||
return V1_1_0_0
|
||||
}
|
||||
|
|
|
@ -77,6 +77,10 @@ func (d *DeleteRecordsRequest) version() int16 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (d *DeleteRecordsRequest) headerVersion() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (d *DeleteRecordsRequest) requiredVersion() KafkaVersion {
|
||||
return V0_11_0_0
|
||||
}
|
||||
|
|
|
@ -80,6 +80,10 @@ func (d *DeleteRecordsResponse) version() int16 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (d *DeleteRecordsResponse) headerVersion() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (d *DeleteRecordsResponse) requiredVersion() KafkaVersion {
|
||||
return V0_11_0_0
|
||||
}
|
||||
|
|
|
@ -38,6 +38,10 @@ func (d *DeleteTopicsRequest) version() int16 {
|
|||
return d.Version
|
||||
}
|
||||
|
||||
func (d *DeleteTopicsRequest) headerVersion() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion {
|
||||
switch d.Version {
|
||||
case 1:
|
||||
|
|
|
@ -68,6 +68,10 @@ func (d *DeleteTopicsResponse) version() int16 {
|
|||
return d.Version
|
||||
}
|
||||
|
||||
func (d *DeleteTopicsResponse) headerVersion() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (d *DeleteTopicsResponse) requiredVersion() KafkaVersion {
|
||||
switch d.Version {
|
||||
case 1:
|
||||
|
|
|
@ -1,15 +1,17 @@
|
|||
package sarama
|
||||
|
||||
type DescribeConfigsRequest struct {
|
||||
Version int16
|
||||
Resources []*ConfigResource
|
||||
IncludeSynonyms bool
|
||||
}
|
||||
|
||||
type ConfigResource struct {
|
||||
Type ConfigResourceType
|
||||
Name string
|
||||
ConfigNames []string
|
||||
}
|
||||
|
||||
type DescribeConfigsRequest struct {
|
||||
Resources []*ConfigResource
|
||||
}
|
||||
|
||||
func (r *DescribeConfigsRequest) encode(pe packetEncoder) error {
|
||||
if err := pe.putArrayLength(len(r.Resources)); err != nil {
|
||||
return err
|
||||
|
@ -30,6 +32,10 @@ func (r *DescribeConfigsRequest) encode(pe packetEncoder) error {
|
|||
}
|
||||
}
|
||||
|
||||
if r.Version >= 1 {
|
||||
pe.putBool(r.IncludeSynonyms)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -74,6 +80,14 @@ func (r *DescribeConfigsRequest) decode(pd packetDecoder, version int16) (err er
|
|||
}
|
||||
r.Resources[i].ConfigNames = cfnames
|
||||
}
|
||||
r.Version = version
|
||||
if r.Version >= 1 {
|
||||
b, err := pd.getBool()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.IncludeSynonyms = b
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -83,9 +97,20 @@ func (r *DescribeConfigsRequest) key() int16 {
|
|||
}
|
||||
|
||||
func (r *DescribeConfigsRequest) version() int16 {
|
||||
return 0
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *DescribeConfigsRequest) headerVersion() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (r *DescribeConfigsRequest) requiredVersion() KafkaVersion {
|
||||
return V0_11_0_0
|
||||
switch r.Version {
|
||||
case 1:
|
||||
return V1_1_0_0
|
||||
case 2:
|
||||
return V2_0_0_0
|
||||
default:
|
||||
return V0_11_0_0
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,8 +1,41 @@
|
|||
package sarama
|
||||
|
||||
import "time"
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ConfigSource int8
|
||||
|
||||
func (s ConfigSource) String() string {
|
||||
switch s {
|
||||
case SourceUnknown:
|
||||
return "Unknown"
|
||||
case SourceTopic:
|
||||
return "Topic"
|
||||
case SourceDynamicBroker:
|
||||
return "DynamicBroker"
|
||||
case SourceDynamicDefaultBroker:
|
||||
return "DynamicDefaultBroker"
|
||||
case SourceStaticBroker:
|
||||
return "StaticBroker"
|
||||
case SourceDefault:
|
||||
return "Default"
|
||||
}
|
||||
return fmt.Sprintf("Source Invalid: %d", int(s))
|
||||
}
|
||||
|
||||
const (
|
||||
SourceUnknown ConfigSource = iota
|
||||
SourceTopic
|
||||
SourceDynamicBroker
|
||||
SourceDynamicDefaultBroker
|
||||
SourceStaticBroker
|
||||
SourceDefault
|
||||
)
|
||||
|
||||
type DescribeConfigsResponse struct {
|
||||
Version int16
|
||||
ThrottleTime time.Duration
|
||||
Resources []*ResourceResponse
|
||||
}
|
||||
|
@ -20,7 +53,15 @@ type ConfigEntry struct {
|
|||
Value string
|
||||
ReadOnly bool
|
||||
Default bool
|
||||
Source ConfigSource
|
||||
Sensitive bool
|
||||
Synonyms []*ConfigSynonym
|
||||
}
|
||||
|
||||
type ConfigSynonym struct {
|
||||
ConfigName string
|
||||
ConfigValue string
|
||||
Source ConfigSource
|
||||
}
|
||||
|
||||
func (r *DescribeConfigsResponse) encode(pe packetEncoder) (err error) {
|
||||
|
@ -30,14 +71,16 @@ func (r *DescribeConfigsResponse) encode(pe packetEncoder) (err error) {
|
|||
}
|
||||
|
||||
for _, c := range r.Resources {
|
||||
if err = c.encode(pe); err != nil {
|
||||
if err = c.encode(pe, r.Version); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *DescribeConfigsResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
r.Version = version
|
||||
throttleTime, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -66,14 +109,25 @@ func (r *DescribeConfigsResponse) key() int16 {
|
|||
}
|
||||
|
||||
func (r *DescribeConfigsResponse) version() int16 {
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *DescribeConfigsResponse) headerVersion() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *DescribeConfigsResponse) requiredVersion() KafkaVersion {
|
||||
return V0_11_0_0
|
||||
switch r.Version {
|
||||
case 1:
|
||||
return V1_0_0_0
|
||||
case 2:
|
||||
return V2_0_0_0
|
||||
default:
|
||||
return V0_11_0_0
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ResourceResponse) encode(pe packetEncoder) (err error) {
|
||||
func (r *ResourceResponse) encode(pe packetEncoder, version int16) (err error) {
|
||||
pe.putInt16(r.ErrorCode)
|
||||
|
||||
if err = pe.putString(r.ErrorMsg); err != nil {
|
||||
|
@ -91,7 +145,7 @@ func (r *ResourceResponse) encode(pe packetEncoder) (err error) {
|
|||
}
|
||||
|
||||
for _, c := range r.Configs {
|
||||
if err = c.encode(pe); err != nil {
|
||||
if err = c.encode(pe, version); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -139,7 +193,7 @@ func (r *ResourceResponse) decode(pd packetDecoder, version int16) (err error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (r *ConfigEntry) encode(pe packetEncoder) (err error) {
|
||||
func (r *ConfigEntry) encode(pe packetEncoder, version int16) (err error) {
|
||||
if err = pe.putString(r.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -149,12 +203,32 @@ func (r *ConfigEntry) encode(pe packetEncoder) (err error) {
|
|||
}
|
||||
|
||||
pe.putBool(r.ReadOnly)
|
||||
pe.putBool(r.Default)
|
||||
pe.putBool(r.Sensitive)
|
||||
|
||||
if version <= 0 {
|
||||
pe.putBool(r.Default)
|
||||
pe.putBool(r.Sensitive)
|
||||
} else {
|
||||
pe.putInt8(int8(r.Source))
|
||||
pe.putBool(r.Sensitive)
|
||||
|
||||
if err := pe.putArrayLength(len(r.Synonyms)); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, c := range r.Synonyms {
|
||||
if err = c.encode(pe, version); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//https://cwiki.apache.org/confluence/display/KAFKA/KIP-226+-+Dynamic+Broker+Configuration
|
||||
func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) {
|
||||
if version == 0 {
|
||||
r.Source = SourceUnknown
|
||||
}
|
||||
name, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -173,16 +247,81 @@ func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) {
|
|||
}
|
||||
r.ReadOnly = read
|
||||
|
||||
de, err := pd.getBool()
|
||||
if err != nil {
|
||||
return err
|
||||
if version == 0 {
|
||||
defaultB, err := pd.getBool()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Default = defaultB
|
||||
if defaultB {
|
||||
r.Source = SourceDefault
|
||||
}
|
||||
} else {
|
||||
source, err := pd.getInt8()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Source = ConfigSource(source)
|
||||
r.Default = r.Source == SourceDefault
|
||||
}
|
||||
r.Default = de
|
||||
|
||||
sensitive, err := pd.getBool()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Sensitive = sensitive
|
||||
|
||||
if version > 0 {
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Synonyms = make([]*ConfigSynonym, n)
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
s := &ConfigSynonym{}
|
||||
if err := s.decode(pd, version); err != nil {
|
||||
return err
|
||||
}
|
||||
r.Synonyms[i] = s
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ConfigSynonym) encode(pe packetEncoder, version int16) (err error) {
|
||||
err = pe.putString(c.ConfigName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = pe.putString(c.ConfigValue)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pe.putInt8(int8(c.Source))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ConfigSynonym) decode(pd packetDecoder, version int16) error {
|
||||
name, err := pd.getString()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
c.ConfigName = name
|
||||
|
||||
value, err := pd.getString()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
c.ConfigValue = value
|
||||
|
||||
source, err := pd.getInt8()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
c.Source = ConfigSource(source)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -21,6 +21,10 @@ func (r *DescribeGroupsRequest) version() int16 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsRequest) headerVersion() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
||||
|
|
|
@ -43,6 +43,10 @@ func (r *DescribeGroupsResponse) version() int16 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsResponse) headerVersion() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
||||
|
|
|
@ -0,0 +1,87 @@
|
|||
package sarama
|
||||
|
||||
// DescribeLogDirsRequest is a describe request to get partitions' log size
|
||||
type DescribeLogDirsRequest struct {
|
||||
// Version 0 and 1 are equal
|
||||
// The version number is bumped to indicate that on quota violation brokers send out responses before throttling.
|
||||
Version int16
|
||||
|
||||
// If this is an empty array, all topics will be queried
|
||||
DescribeTopics []DescribeLogDirsRequestTopic
|
||||
}
|
||||
|
||||
// DescribeLogDirsRequestTopic is a describe request about the log dir of one or more partitions within a Topic
|
||||
type DescribeLogDirsRequestTopic struct {
|
||||
Topic string
|
||||
PartitionIDs []int32
|
||||
}
|
||||
|
||||
func (r *DescribeLogDirsRequest) encode(pe packetEncoder) error {
|
||||
length := len(r.DescribeTopics)
|
||||
if length == 0 {
|
||||
// In order to query all topics we must send null
|
||||
length = -1
|
||||
}
|
||||
|
||||
if err := pe.putArrayLength(length); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, d := range r.DescribeTopics {
|
||||
if err := pe.putString(d.Topic); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := pe.putInt32Array(d.PartitionIDs); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *DescribeLogDirsRequest) decode(pd packetDecoder, version int16) error {
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n == -1 {
|
||||
n = 0
|
||||
}
|
||||
|
||||
topics := make([]DescribeLogDirsRequestTopic, n)
|
||||
for i := 0; i < n; i++ {
|
||||
topics[i] = DescribeLogDirsRequestTopic{}
|
||||
|
||||
topic, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
topics[i].Topic = topic
|
||||
|
||||
pIDs, err := pd.getInt32Array()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
topics[i].PartitionIDs = pIDs
|
||||
}
|
||||
r.DescribeTopics = topics
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *DescribeLogDirsRequest) key() int16 {
|
||||
return 35
|
||||
}
|
||||
|
||||
func (r *DescribeLogDirsRequest) version() int16 {
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *DescribeLogDirsRequest) headerVersion() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (r *DescribeLogDirsRequest) requiredVersion() KafkaVersion {
|
||||
return V1_0_0_0
|
||||
}
|
|
@ -0,0 +1,229 @@
|
|||
package sarama
|
||||
|
||||
import "time"
|
||||
|
||||
type DescribeLogDirsResponse struct {
|
||||
ThrottleTime time.Duration
|
||||
|
||||
// Version 0 and 1 are equal
|
||||
// The version number is bumped to indicate that on quota violation brokers send out responses before throttling.
|
||||
Version int16
|
||||
|
||||
LogDirs []DescribeLogDirsResponseDirMetadata
|
||||
}
|
||||
|
||||
func (r *DescribeLogDirsResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
|
||||
|
||||
if err := pe.putArrayLength(len(r.LogDirs)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, dir := range r.LogDirs {
|
||||
if err := dir.encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *DescribeLogDirsResponse) decode(pd packetDecoder, version int16) error {
|
||||
throttleTime, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
|
||||
|
||||
// Decode array of DescribeLogDirsResponseDirMetadata
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.LogDirs = make([]DescribeLogDirsResponseDirMetadata, n)
|
||||
for i := 0; i < n; i++ {
|
||||
dir := DescribeLogDirsResponseDirMetadata{}
|
||||
if err := dir.decode(pd, version); err != nil {
|
||||
return err
|
||||
}
|
||||
r.LogDirs[i] = dir
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *DescribeLogDirsResponse) key() int16 {
|
||||
return 35
|
||||
}
|
||||
|
||||
func (r *DescribeLogDirsResponse) version() int16 {
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *DescribeLogDirsResponse) headerVersion() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *DescribeLogDirsResponse) requiredVersion() KafkaVersion {
|
||||
return V1_0_0_0
|
||||
}
|
||||
|
||||
type DescribeLogDirsResponseDirMetadata struct {
|
||||
ErrorCode KError
|
||||
|
||||
// The absolute log directory path
|
||||
Path string
|
||||
Topics []DescribeLogDirsResponseTopic
|
||||
}
|
||||
|
||||
func (r *DescribeLogDirsResponseDirMetadata) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(r.ErrorCode))
|
||||
|
||||
if err := pe.putString(r.Path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := pe.putArrayLength(len(r.Topics)); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, topic := range r.Topics {
|
||||
if err := topic.encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *DescribeLogDirsResponseDirMetadata) decode(pd packetDecoder, version int16) error {
|
||||
errCode, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.ErrorCode = KError(errCode)
|
||||
|
||||
path, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Path = path
|
||||
|
||||
// Decode array of DescribeLogDirsResponseTopic
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Topics = make([]DescribeLogDirsResponseTopic, n)
|
||||
for i := 0; i < n; i++ {
|
||||
t := DescribeLogDirsResponseTopic{}
|
||||
|
||||
if err := t.decode(pd, version); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Topics[i] = t
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DescribeLogDirsResponseTopic contains a topic's partitions descriptions
|
||||
type DescribeLogDirsResponseTopic struct {
|
||||
Topic string
|
||||
Partitions []DescribeLogDirsResponsePartition
|
||||
}
|
||||
|
||||
func (r *DescribeLogDirsResponseTopic) encode(pe packetEncoder) error {
|
||||
if err := pe.putString(r.Topic); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := pe.putArrayLength(len(r.Partitions)); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, partition := range r.Partitions {
|
||||
if err := partition.encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *DescribeLogDirsResponseTopic) decode(pd packetDecoder, version int16) error {
|
||||
t, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Topic = t
|
||||
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Partitions = make([]DescribeLogDirsResponsePartition, n)
|
||||
for i := 0; i < n; i++ {
|
||||
p := DescribeLogDirsResponsePartition{}
|
||||
if err := p.decode(pd, version); err != nil {
|
||||
return err
|
||||
}
|
||||
r.Partitions[i] = p
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DescribeLogDirsResponsePartition describes a partition's log directory
|
||||
type DescribeLogDirsResponsePartition struct {
|
||||
PartitionID int32
|
||||
|
||||
// The size of the log segments of the partition in bytes.
|
||||
Size int64
|
||||
|
||||
// The lag of the log's LEO w.r.t. partition's HW (if it is the current log for the partition) or
|
||||
// current replica's LEO (if it is the future log for the partition)
|
||||
OffsetLag int64
|
||||
|
||||
// True if this log is created by AlterReplicaLogDirsRequest and will replace the current log of
|
||||
// the replica in the future.
|
||||
IsTemporary bool
|
||||
}
|
||||
|
||||
func (r *DescribeLogDirsResponsePartition) encode(pe packetEncoder) error {
|
||||
pe.putInt32(r.PartitionID)
|
||||
pe.putInt64(r.Size)
|
||||
pe.putInt64(r.OffsetLag)
|
||||
pe.putBool(r.IsTemporary)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *DescribeLogDirsResponsePartition) decode(pd packetDecoder, version int16) error {
|
||||
pID, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.PartitionID = pID
|
||||
|
||||
size, err := pd.getInt64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Size = size
|
||||
|
||||
lag, err := pd.getInt64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.OffsetLag = lag
|
||||
|
||||
isTemp, err := pd.getBool()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.IsTemporary = isTemp
|
||||
|
||||
return nil
|
||||
}
|
|
@ -2,7 +2,7 @@ name: sarama
|
|||
|
||||
up:
|
||||
- go:
|
||||
version: '1.11'
|
||||
version: '1.15.2'
|
||||
|
||||
commands:
|
||||
test:
|
||||
|
|
|
@ -0,0 +1,134 @@
|
|||
version: '3.7'
|
||||
services:
|
||||
zookeeper-1:
|
||||
image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-5.5.0}'
|
||||
restart: always
|
||||
environment:
|
||||
ZOOKEEPER_SERVER_ID: '1'
|
||||
ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888'
|
||||
ZOOKEEPER_CLIENT_PORT: '2181'
|
||||
ZOOKEEPER_PEER_PORT: '2888'
|
||||
ZOOKEEPER_LEADER_PORT: '3888'
|
||||
ZOOKEEPER_INIT_LIMIT: '10'
|
||||
ZOOKEEPER_SYNC_LIMIT: '5'
|
||||
ZOOKEEPER_MAX_CLIENT_CONNS: '0'
|
||||
zookeeper-2:
|
||||
image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-5.5.0}'
|
||||
restart: always
|
||||
environment:
|
||||
ZOOKEEPER_SERVER_ID: '2'
|
||||
ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888'
|
||||
ZOOKEEPER_CLIENT_PORT: '2181'
|
||||
ZOOKEEPER_PEER_PORT: '2888'
|
||||
ZOOKEEPER_LEADER_PORT: '3888'
|
||||
ZOOKEEPER_INIT_LIMIT: '10'
|
||||
ZOOKEEPER_SYNC_LIMIT: '5'
|
||||
ZOOKEEPER_MAX_CLIENT_CONNS: '0'
|
||||
zookeeper-3:
|
||||
image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-5.5.0}'
|
||||
restart: always
|
||||
environment:
|
||||
ZOOKEEPER_SERVER_ID: '3'
|
||||
ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888'
|
||||
ZOOKEEPER_CLIENT_PORT: '2181'
|
||||
ZOOKEEPER_PEER_PORT: '2888'
|
||||
ZOOKEEPER_LEADER_PORT: '3888'
|
||||
ZOOKEEPER_INIT_LIMIT: '10'
|
||||
ZOOKEEPER_SYNC_LIMIT: '5'
|
||||
ZOOKEEPER_MAX_CLIENT_CONNS: '0'
|
||||
kafka-1:
|
||||
image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-5.5.0}'
|
||||
restart: always
|
||||
environment:
|
||||
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181'
|
||||
KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29091'
|
||||
KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-1:9091,LISTENER_LOCAL://localhost:29091'
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL'
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT'
|
||||
KAFKA_DEFAULT_REPLICATION_FACTOR: '2'
|
||||
KAFKA_BROKER_ID: '1'
|
||||
KAFKA_BROKER_RACK: '1'
|
||||
KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000'
|
||||
KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000'
|
||||
KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector'
|
||||
KAFKA_DELETE_TOPIC_ENABLE: 'true'
|
||||
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
|
||||
kafka-2:
|
||||
image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-5.5.0}'
|
||||
restart: always
|
||||
environment:
|
||||
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181'
|
||||
KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29092'
|
||||
KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-2:9091,LISTENER_LOCAL://localhost:29092'
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL'
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT'
|
||||
KAFKA_DEFAULT_REPLICATION_FACTOR: '2'
|
||||
KAFKA_BROKER_ID: '2'
|
||||
KAFKA_BROKER_RACK: '2'
|
||||
KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000'
|
||||
KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000'
|
||||
KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector'
|
||||
KAFKA_DELETE_TOPIC_ENABLE: 'true'
|
||||
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
|
||||
kafka-3:
|
||||
image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-5.5.0}'
|
||||
restart: always
|
||||
environment:
|
||||
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181'
|
||||
KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29093'
|
||||
KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-3:9091,LISTENER_LOCAL://localhost:29093'
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL'
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT'
|
||||
KAFKA_DEFAULT_REPLICATION_FACTOR: '2'
|
||||
KAFKA_BROKER_ID: '3'
|
||||
KAFKA_BROKER_RACK: '3'
|
||||
KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000'
|
||||
KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000'
|
||||
KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector'
|
||||
KAFKA_DELETE_TOPIC_ENABLE: 'true'
|
||||
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
|
||||
kafka-4:
|
||||
image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-5.5.0}'
|
||||
restart: always
|
||||
environment:
|
||||
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181'
|
||||
KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29094'
|
||||
KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-4:9091,LISTENER_LOCAL://localhost:29094'
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL'
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT'
|
||||
KAFKA_DEFAULT_REPLICATION_FACTOR: '2'
|
||||
KAFKA_BROKER_ID: '4'
|
||||
KAFKA_BROKER_RACK: '4'
|
||||
KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000'
|
||||
KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000'
|
||||
KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector'
|
||||
KAFKA_DELETE_TOPIC_ENABLE: 'true'
|
||||
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
|
||||
kafka-5:
|
||||
image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-5.5.0}'
|
||||
restart: always
|
||||
environment:
|
||||
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181'
|
||||
KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29095'
|
||||
KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-5:9091,LISTENER_LOCAL://localhost:29095'
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL'
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT'
|
||||
KAFKA_DEFAULT_REPLICATION_FACTOR: '2'
|
||||
KAFKA_BROKER_ID: '5'
|
||||
KAFKA_BROKER_RACK: '5'
|
||||
KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000'
|
||||
KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000'
|
||||
KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector'
|
||||
KAFKA_DELETE_TOPIC_ENABLE: 'true'
|
||||
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
|
||||
toxiproxy:
|
||||
image: 'shopify/toxiproxy:2.1.4'
|
||||
ports:
|
||||
# The tests themselves actually start the proies on these ports
|
||||
- '29091:29091'
|
||||
- '29092:29092'
|
||||
- '29093:29093'
|
||||
- '29094:29094'
|
||||
- '29095:29095'
|
||||
# This is the toxiproxy API port
|
||||
- '8474:8474'
|
|
@ -12,6 +12,11 @@ type encoder interface {
|
|||
encode(pe packetEncoder) error
|
||||
}
|
||||
|
||||
type encoderWithHeader interface {
|
||||
encoder
|
||||
headerVersion() int16
|
||||
}
|
||||
|
||||
// Encode takes an Encoder and turns it into bytes while potentially recording metrics.
|
||||
func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) {
|
||||
if e == nil {
|
||||
|
|
|
@ -45,6 +45,10 @@ func (a *EndTxnRequest) version() int16 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (r *EndTxnRequest) headerVersion() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (a *EndTxnRequest) requiredVersion() KafkaVersion {
|
||||
return V0_11_0_0
|
||||
}
|
||||
|
|
|
@ -39,6 +39,10 @@ func (e *EndTxnResponse) version() int16 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (r *EndTxnResponse) headerVersion() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (e *EndTxnResponse) requiredVersion() KafkaVersion {
|
||||
return V0_11_0_0
|
||||
}
|
||||
|
|
|
@ -81,6 +81,44 @@ func (err ConfigurationError) Error() string {
|
|||
// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes
|
||||
type KError int16
|
||||
|
||||
// MultiError is used to contain multi error.
|
||||
type MultiError struct {
|
||||
Errors *[]error
|
||||
}
|
||||
|
||||
func (mErr MultiError) Error() string {
|
||||
var errString = ""
|
||||
for _, err := range *mErr.Errors {
|
||||
errString += err.Error() + ","
|
||||
}
|
||||
return errString
|
||||
}
|
||||
|
||||
func (mErr MultiError) PrettyError() string {
|
||||
var errString = ""
|
||||
for _, err := range *mErr.Errors {
|
||||
errString += err.Error() + "\n"
|
||||
}
|
||||
return errString
|
||||
}
|
||||
|
||||
// ErrDeleteRecords is the type of error returned when fail to delete the required records
|
||||
type ErrDeleteRecords struct {
|
||||
MultiError
|
||||
}
|
||||
|
||||
func (err ErrDeleteRecords) Error() string {
|
||||
return "kafka server: failed to delete records " + err.MultiError.Error()
|
||||
}
|
||||
|
||||
type ErrReassignPartitions struct {
|
||||
MultiError
|
||||
}
|
||||
|
||||
func (err ErrReassignPartitions) Error() string {
|
||||
return fmt.Sprintf("failed to reassign partitions for topic: \n%s", err.MultiError.PrettyError())
|
||||
}
|
||||
|
||||
// Numeric error codes returned by the Kafka server.
|
||||
const (
|
||||
ErrNoError KError = 0
|
||||
|
@ -145,6 +183,28 @@ const (
|
|||
ErrSASLAuthenticationFailed KError = 58
|
||||
ErrUnknownProducerID KError = 59
|
||||
ErrReassignmentInProgress KError = 60
|
||||
ErrDelegationTokenAuthDisabled KError = 61
|
||||
ErrDelegationTokenNotFound KError = 62
|
||||
ErrDelegationTokenOwnerMismatch KError = 63
|
||||
ErrDelegationTokenRequestNotAllowed KError = 64
|
||||
ErrDelegationTokenAuthorizationFailed KError = 65
|
||||
ErrDelegationTokenExpired KError = 66
|
||||
ErrInvalidPrincipalType KError = 67
|
||||
ErrNonEmptyGroup KError = 68
|
||||
ErrGroupIDNotFound KError = 69
|
||||
ErrFetchSessionIDNotFound KError = 70
|
||||
ErrInvalidFetchSessionEpoch KError = 71
|
||||
ErrListenerNotFound KError = 72
|
||||
ErrTopicDeletionDisabled KError = 73
|
||||
ErrFencedLeaderEpoch KError = 74
|
||||
ErrUnknownLeaderEpoch KError = 75
|
||||
ErrUnsupportedCompressionType KError = 76
|
||||
ErrStaleBrokerEpoch KError = 77
|
||||
ErrOffsetNotAvailable KError = 78
|
||||
ErrMemberIdRequired KError = 79
|
||||
ErrPreferredLeaderNotAvailable KError = 80
|
||||
ErrGroupMaxSizeReached KError = 81
|
||||
ErrFencedInstancedId KError = 82
|
||||
)
|
||||
|
||||
func (err KError) Error() string {
|
||||
|
@ -275,6 +335,50 @@ func (err KError) Error() string {
|
|||
return "kafka server: The broker could not locate the producer metadata associated with the Producer ID."
|
||||
case ErrReassignmentInProgress:
|
||||
return "kafka server: A partition reassignment is in progress."
|
||||
case ErrDelegationTokenAuthDisabled:
|
||||
return "kafka server: Delegation Token feature is not enabled."
|
||||
case ErrDelegationTokenNotFound:
|
||||
return "kafka server: Delegation Token is not found on server."
|
||||
case ErrDelegationTokenOwnerMismatch:
|
||||
return "kafka server: Specified Principal is not valid Owner/Renewer."
|
||||
case ErrDelegationTokenRequestNotAllowed:
|
||||
return "kafka server: Delegation Token requests are not allowed on PLAINTEXT/1-way SSL channels and on delegation token authenticated channels."
|
||||
case ErrDelegationTokenAuthorizationFailed:
|
||||
return "kafka server: Delegation Token authorization failed."
|
||||
case ErrDelegationTokenExpired:
|
||||
return "kafka server: Delegation Token is expired."
|
||||
case ErrInvalidPrincipalType:
|
||||
return "kafka server: Supplied principalType is not supported."
|
||||
case ErrNonEmptyGroup:
|
||||
return "kafka server: The group is not empty."
|
||||
case ErrGroupIDNotFound:
|
||||
return "kafka server: The group id does not exist."
|
||||
case ErrFetchSessionIDNotFound:
|
||||
return "kafka server: The fetch session ID was not found."
|
||||
case ErrInvalidFetchSessionEpoch:
|
||||
return "kafka server: The fetch session epoch is invalid."
|
||||
case ErrListenerNotFound:
|
||||
return "kafka server: There is no listener on the leader broker that matches the listener on which metadata request was processed."
|
||||
case ErrTopicDeletionDisabled:
|
||||
return "kafka server: Topic deletion is disabled."
|
||||
case ErrFencedLeaderEpoch:
|
||||
return "kafka server: The leader epoch in the request is older than the epoch on the broker."
|
||||
case ErrUnknownLeaderEpoch:
|
||||
return "kafka server: The leader epoch in the request is newer than the epoch on the broker."
|
||||
case ErrUnsupportedCompressionType:
|
||||
return "kafka server: The requesting client does not support the compression type of given partition."
|
||||
case ErrStaleBrokerEpoch:
|
||||
return "kafka server: Broker epoch has changed"
|
||||
case ErrOffsetNotAvailable:
|
||||
return "kafka server: The leader high watermark has not caught up from a recent leader election so the offsets cannot be guaranteed to be monotonically increasing"
|
||||
case ErrMemberIdRequired:
|
||||
return "kafka server: The group member needs to have a valid member id before actually entering a consumer group"
|
||||
case ErrPreferredLeaderNotAvailable:
|
||||
return "kafka server: The preferred leader was not available"
|
||||
case ErrGroupMaxSizeReached:
|
||||
return "kafka server: Consumer group The consumer group has reached its max size. already has the configured maximum number of members."
|
||||
case ErrFencedInstancedId:
|
||||
return "kafka server: The broker rejected this static consumer since another consumer with the same group.instance.id has registered with a different member.id."
|
||||
}
|
||||
|
||||
return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)
|
||||
|
|
|
@ -1,20 +1,41 @@
|
|||
package sarama
|
||||
|
||||
type fetchRequestBlock struct {
|
||||
fetchOffset int64
|
||||
maxBytes int32
|
||||
Version int16
|
||||
currentLeaderEpoch int32
|
||||
fetchOffset int64
|
||||
logStartOffset int64
|
||||
maxBytes int32
|
||||
}
|
||||
|
||||
func (b *fetchRequestBlock) encode(pe packetEncoder) error {
|
||||
func (b *fetchRequestBlock) encode(pe packetEncoder, version int16) error {
|
||||
b.Version = version
|
||||
if b.Version >= 9 {
|
||||
pe.putInt32(b.currentLeaderEpoch)
|
||||
}
|
||||
pe.putInt64(b.fetchOffset)
|
||||
if b.Version >= 5 {
|
||||
pe.putInt64(b.logStartOffset)
|
||||
}
|
||||
pe.putInt32(b.maxBytes)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) {
|
||||
func (b *fetchRequestBlock) decode(pd packetDecoder, version int16) (err error) {
|
||||
b.Version = version
|
||||
if b.Version >= 9 {
|
||||
if b.currentLeaderEpoch, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if b.fetchOffset, err = pd.getInt64(); err != nil {
|
||||
return err
|
||||
}
|
||||
if b.Version >= 5 {
|
||||
if b.logStartOffset, err = pd.getInt64(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if b.maxBytes, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -25,19 +46,23 @@ func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) {
|
|||
// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at
|
||||
// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes
|
||||
type FetchRequest struct {
|
||||
MaxWaitTime int32
|
||||
MinBytes int32
|
||||
MaxBytes int32
|
||||
Version int16
|
||||
Isolation IsolationLevel
|
||||
blocks map[string]map[int32]*fetchRequestBlock
|
||||
MaxWaitTime int32
|
||||
MinBytes int32
|
||||
MaxBytes int32
|
||||
Version int16
|
||||
Isolation IsolationLevel
|
||||
SessionID int32
|
||||
SessionEpoch int32
|
||||
blocks map[string]map[int32]*fetchRequestBlock
|
||||
forgotten map[string][]int32
|
||||
RackID string
|
||||
}
|
||||
|
||||
type IsolationLevel int8
|
||||
|
||||
const (
|
||||
ReadUncommitted IsolationLevel = 0
|
||||
ReadCommitted IsolationLevel = 1
|
||||
ReadUncommitted IsolationLevel = iota
|
||||
ReadCommitted
|
||||
)
|
||||
|
||||
func (r *FetchRequest) encode(pe packetEncoder) (err error) {
|
||||
|
@ -50,6 +75,10 @@ func (r *FetchRequest) encode(pe packetEncoder) (err error) {
|
|||
if r.Version >= 4 {
|
||||
pe.putInt8(int8(r.Isolation))
|
||||
}
|
||||
if r.Version >= 7 {
|
||||
pe.putInt32(r.SessionID)
|
||||
pe.putInt32(r.SessionEpoch)
|
||||
}
|
||||
err = pe.putArrayLength(len(r.blocks))
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -65,17 +94,44 @@ func (r *FetchRequest) encode(pe packetEncoder) (err error) {
|
|||
}
|
||||
for partition, block := range blocks {
|
||||
pe.putInt32(partition)
|
||||
err = block.encode(pe)
|
||||
err = block.encode(pe, r.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if r.Version >= 7 {
|
||||
err = pe.putArrayLength(len(r.forgotten))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for topic, partitions := range r.forgotten {
|
||||
err = pe.putString(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = pe.putArrayLength(len(partitions))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, partition := range partitions {
|
||||
pe.putInt32(partition)
|
||||
}
|
||||
}
|
||||
}
|
||||
if r.Version >= 11 {
|
||||
err = pe.putString(r.RackID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
r.Version = version
|
||||
|
||||
if _, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -97,6 +153,16 @@ func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
|
|||
}
|
||||
r.Isolation = IsolationLevel(isolation)
|
||||
}
|
||||
if r.Version >= 7 {
|
||||
r.SessionID, err = pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.SessionEpoch, err = pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
topicCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -121,12 +187,47 @@ func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
|
|||
return err
|
||||
}
|
||||
fetchBlock := &fetchRequestBlock{}
|
||||
if err = fetchBlock.decode(pd); err != nil {
|
||||
if err = fetchBlock.decode(pd, r.Version); err != nil {
|
||||
return err
|
||||
}
|
||||
r.blocks[topic][partition] = fetchBlock
|
||||
}
|
||||
}
|
||||
|
||||
if r.Version >= 7 {
|
||||
forgottenCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.forgotten = make(map[string][]int32)
|
||||
for i := 0; i < forgottenCount; i++ {
|
||||
topic, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
partitionCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.forgotten[topic] = make([]int32, partitionCount)
|
||||
|
||||
for j := 0; j < partitionCount; j++ {
|
||||
partition, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.forgotten[topic][j] = partition
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if r.Version >= 11 {
|
||||
r.RackID, err = pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -138,18 +239,34 @@ func (r *FetchRequest) version() int16 {
|
|||
return r.Version
|
||||
}
|
||||
|
||||
func (r *FetchRequest) headerVersion() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (r *FetchRequest) requiredVersion() KafkaVersion {
|
||||
switch r.Version {
|
||||
case 0:
|
||||
return MinVersion
|
||||
case 1:
|
||||
return V0_9_0_0
|
||||
case 2:
|
||||
return V0_10_0_0
|
||||
case 3:
|
||||
return V0_10_1_0
|
||||
case 4:
|
||||
case 4, 5:
|
||||
return V0_11_0_0
|
||||
case 6:
|
||||
return V1_0_0_0
|
||||
case 7:
|
||||
return V1_1_0_0
|
||||
case 8:
|
||||
return V2_0_0_0
|
||||
case 9, 10:
|
||||
return V2_1_0_0
|
||||
case 11:
|
||||
return V2_3_0_0
|
||||
default:
|
||||
return MinVersion
|
||||
return MaxVersion
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -158,13 +275,21 @@ func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int
|
|||
r.blocks = make(map[string]map[int32]*fetchRequestBlock)
|
||||
}
|
||||
|
||||
if r.Version >= 7 && r.forgotten == nil {
|
||||
r.forgotten = make(map[string][]int32)
|
||||
}
|
||||
|
||||
if r.blocks[topic] == nil {
|
||||
r.blocks[topic] = make(map[int32]*fetchRequestBlock)
|
||||
}
|
||||
|
||||
tmp := new(fetchRequestBlock)
|
||||
tmp.Version = r.Version
|
||||
tmp.maxBytes = maxBytes
|
||||
tmp.fetchOffset = fetchOffset
|
||||
if r.Version >= 9 {
|
||||
tmp.currentLeaderEpoch = int32(-1)
|
||||
}
|
||||
|
||||
r.blocks[topic][partitionID] = tmp
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
@ -29,13 +30,15 @@ func (t *AbortedTransaction) encode(pe packetEncoder) (err error) {
|
|||
}
|
||||
|
||||
type FetchResponseBlock struct {
|
||||
Err KError
|
||||
HighWaterMarkOffset int64
|
||||
LastStableOffset int64
|
||||
AbortedTransactions []*AbortedTransaction
|
||||
Records *Records // deprecated: use FetchResponseBlock.Records
|
||||
RecordsSet []*Records
|
||||
Partial bool
|
||||
Err KError
|
||||
HighWaterMarkOffset int64
|
||||
LastStableOffset int64
|
||||
LogStartOffset int64
|
||||
AbortedTransactions []*AbortedTransaction
|
||||
PreferredReadReplica int32
|
||||
Records *Records // deprecated: use FetchResponseBlock.RecordsSet
|
||||
RecordsSet []*Records
|
||||
Partial bool
|
||||
}
|
||||
|
||||
func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
|
||||
|
@ -56,6 +59,13 @@ func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error)
|
|||
return err
|
||||
}
|
||||
|
||||
if version >= 5 {
|
||||
b.LogStartOffset, err = pd.getInt64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
numTransact, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -74,6 +84,13 @@ func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error)
|
|||
}
|
||||
}
|
||||
|
||||
if version >= 11 {
|
||||
b.PreferredReadReplica, err = pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
recordsSize, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -165,6 +182,10 @@ func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error)
|
|||
if version >= 4 {
|
||||
pe.putInt64(b.LastStableOffset)
|
||||
|
||||
if version >= 5 {
|
||||
pe.putInt64(b.LogStartOffset)
|
||||
}
|
||||
|
||||
if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -175,6 +196,10 @@ func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error)
|
|||
}
|
||||
}
|
||||
|
||||
if version >= 11 {
|
||||
pe.putInt32(b.PreferredReadReplica)
|
||||
}
|
||||
|
||||
pe.push(&lengthField{})
|
||||
for _, records := range b.RecordsSet {
|
||||
err = records.encode(pe)
|
||||
|
@ -185,10 +210,25 @@ func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error)
|
|||
return pe.pop()
|
||||
}
|
||||
|
||||
func (b *FetchResponseBlock) getAbortedTransactions() []*AbortedTransaction {
|
||||
// I can't find any doc that guarantee the field `fetchResponse.AbortedTransactions` is ordered
|
||||
// plus Java implementation use a PriorityQueue based on `FirstOffset`. I guess we have to order it ourself
|
||||
at := b.AbortedTransactions
|
||||
sort.Slice(
|
||||
at,
|
||||
func(i, j int) bool { return at[i].FirstOffset < at[j].FirstOffset },
|
||||
)
|
||||
return at
|
||||
}
|
||||
|
||||
type FetchResponse struct {
|
||||
Blocks map[string]map[int32]*FetchResponseBlock
|
||||
ThrottleTime time.Duration
|
||||
Version int16 // v1 requires 0.9+, v2 requires 0.10+
|
||||
Blocks map[string]map[int32]*FetchResponseBlock
|
||||
ThrottleTime time.Duration
|
||||
ErrorCode int16
|
||||
SessionID int32
|
||||
Version int16
|
||||
LogAppendTime bool
|
||||
Timestamp time.Time
|
||||
}
|
||||
|
||||
func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
|
@ -202,6 +242,17 @@ func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) {
|
|||
r.ThrottleTime = time.Duration(throttle) * time.Millisecond
|
||||
}
|
||||
|
||||
if r.Version >= 7 {
|
||||
r.ErrorCode, err = pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.SessionID, err = pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
numTopics, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -244,6 +295,11 @@ func (r *FetchResponse) encode(pe packetEncoder) (err error) {
|
|||
pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
|
||||
}
|
||||
|
||||
if r.Version >= 7 {
|
||||
pe.putInt16(r.ErrorCode)
|
||||
pe.putInt32(r.SessionID)
|
||||
}
|
||||
|
||||
err = pe.putArrayLength(len(r.Blocks))
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -267,7 +323,6 @@ func (r *FetchResponse) encode(pe packetEncoder) (err error) {
|
|||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -280,18 +335,34 @@ func (r *FetchResponse) version() int16 {
|
|||
return r.Version
|
||||
}
|
||||
|
||||
func (r *FetchResponse) headerVersion() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *FetchResponse) requiredVersion() KafkaVersion {
|
||||
switch r.Version {
|
||||
case 0:
|
||||
return MinVersion
|
||||
case 1:
|
||||
return V0_9_0_0
|
||||
case 2:
|
||||
return V0_10_0_0
|
||||
case 3:
|
||||
return V0_10_1_0
|
||||
case 4:
|
||||
case 4, 5:
|
||||
return V0_11_0_0
|
||||
case 6:
|
||||
return V1_0_0_0
|
||||
case 7:
|
||||
return V1_1_0_0
|
||||
case 8:
|
||||
return V2_0_0_0
|
||||
case 9, 10:
|
||||
return V2_1_0_0
|
||||
case 11:
|
||||
return V2_3_0_0
|
||||
default:
|
||||
return MinVersion
|
||||
return MaxVersion
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -355,10 +426,13 @@ func encodeKV(key, value Encoder) ([]byte, []byte) {
|
|||
return kb, vb
|
||||
}
|
||||
|
||||
func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
|
||||
func (r *FetchResponse) AddMessageWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time, version int8) {
|
||||
frb := r.getOrCreateBlock(topic, partition)
|
||||
kb, vb := encodeKV(key, value)
|
||||
msg := &Message{Key: kb, Value: vb}
|
||||
if r.LogAppendTime {
|
||||
timestamp = r.Timestamp
|
||||
}
|
||||
msg := &Message{Key: kb, Value: vb, LogAppendTime: r.LogAppendTime, Timestamp: timestamp, Version: version}
|
||||
msgBlock := &MessageBlock{Msg: msg, Offset: offset}
|
||||
if len(frb.RecordsSet) == 0 {
|
||||
records := newLegacyRecords(&MessageSet{})
|
||||
|
@ -368,18 +442,94 @@ func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Enc
|
|||
set.Messages = append(set.Messages, msgBlock)
|
||||
}
|
||||
|
||||
func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) {
|
||||
func (r *FetchResponse) AddRecordWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time) {
|
||||
frb := r.getOrCreateBlock(topic, partition)
|
||||
kb, vb := encodeKV(key, value)
|
||||
rec := &Record{Key: kb, Value: vb, OffsetDelta: offset}
|
||||
if len(frb.RecordsSet) == 0 {
|
||||
records := newDefaultRecords(&RecordBatch{Version: 2})
|
||||
records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp})
|
||||
frb.RecordsSet = []*Records{&records}
|
||||
}
|
||||
batch := frb.RecordsSet[0].RecordBatch
|
||||
rec := &Record{Key: kb, Value: vb, OffsetDelta: offset, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
|
||||
batch.addRecord(rec)
|
||||
}
|
||||
|
||||
// AddRecordBatchWithTimestamp is similar to AddRecordWithTimestamp
|
||||
// But instead of appending 1 record to a batch, it append a new batch containing 1 record to the fetchResponse
|
||||
// Since transaction are handled on batch level (the whole batch is either committed or aborted), use this to test transactions
|
||||
func (r *FetchResponse) AddRecordBatchWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool, timestamp time.Time) {
|
||||
frb := r.getOrCreateBlock(topic, partition)
|
||||
kb, vb := encodeKV(key, value)
|
||||
|
||||
records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp})
|
||||
batch := &RecordBatch{
|
||||
Version: 2,
|
||||
LogAppendTime: r.LogAppendTime,
|
||||
FirstTimestamp: timestamp,
|
||||
MaxTimestamp: r.Timestamp,
|
||||
FirstOffset: offset,
|
||||
LastOffsetDelta: 0,
|
||||
ProducerID: producerID,
|
||||
IsTransactional: isTransactional,
|
||||
}
|
||||
rec := &Record{Key: kb, Value: vb, OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
|
||||
batch.addRecord(rec)
|
||||
records.RecordBatch = batch
|
||||
|
||||
frb.RecordsSet = append(frb.RecordsSet, &records)
|
||||
}
|
||||
|
||||
func (r *FetchResponse) AddControlRecordWithTimestamp(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType, timestamp time.Time) {
|
||||
frb := r.getOrCreateBlock(topic, partition)
|
||||
|
||||
// batch
|
||||
batch := &RecordBatch{
|
||||
Version: 2,
|
||||
LogAppendTime: r.LogAppendTime,
|
||||
FirstTimestamp: timestamp,
|
||||
MaxTimestamp: r.Timestamp,
|
||||
FirstOffset: offset,
|
||||
LastOffsetDelta: 0,
|
||||
ProducerID: producerID,
|
||||
IsTransactional: true,
|
||||
Control: true,
|
||||
}
|
||||
|
||||
// records
|
||||
records := newDefaultRecords(nil)
|
||||
records.RecordBatch = batch
|
||||
|
||||
// record
|
||||
crAbort := ControlRecord{
|
||||
Version: 0,
|
||||
Type: recordType,
|
||||
}
|
||||
crKey := &realEncoder{raw: make([]byte, 4)}
|
||||
crValue := &realEncoder{raw: make([]byte, 6)}
|
||||
crAbort.encode(crKey, crValue)
|
||||
rec := &Record{Key: ByteEncoder(crKey.raw), Value: ByteEncoder(crValue.raw), OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
|
||||
batch.addRecord(rec)
|
||||
|
||||
frb.RecordsSet = append(frb.RecordsSet, &records)
|
||||
}
|
||||
|
||||
func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
|
||||
r.AddMessageWithTimestamp(topic, partition, key, value, offset, time.Time{}, 0)
|
||||
}
|
||||
|
||||
func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) {
|
||||
r.AddRecordWithTimestamp(topic, partition, key, value, offset, time.Time{})
|
||||
}
|
||||
|
||||
func (r *FetchResponse) AddRecordBatch(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool) {
|
||||
r.AddRecordBatchWithTimestamp(topic, partition, key, value, offset, producerID, isTransactional, time.Time{})
|
||||
}
|
||||
|
||||
func (r *FetchResponse) AddControlRecord(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType) {
|
||||
// define controlRecord key and value
|
||||
r.AddControlRecordWithTimestamp(topic, partition, offset, producerID, recordType, time.Time{})
|
||||
}
|
||||
|
||||
func (r *FetchResponse) SetLastOffsetDelta(topic string, partition int32, offset int32) {
|
||||
frb := r.getOrCreateBlock(topic, partition)
|
||||
if len(frb.RecordsSet) == 0 {
|
||||
|
|
|
@ -3,8 +3,8 @@ package sarama
|
|||
type CoordinatorType int8
|
||||
|
||||
const (
|
||||
CoordinatorGroup CoordinatorType = 0
|
||||
CoordinatorTransaction CoordinatorType = 1
|
||||
CoordinatorGroup CoordinatorType = iota
|
||||
CoordinatorTransaction
|
||||
)
|
||||
|
||||
type FindCoordinatorRequest struct {
|
||||
|
@ -51,6 +51,10 @@ func (f *FindCoordinatorRequest) version() int16 {
|
|||
return f.Version
|
||||
}
|
||||
|
||||
func (r *FindCoordinatorRequest) headerVersion() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (f *FindCoordinatorRequest) requiredVersion() KafkaVersion {
|
||||
switch f.Version {
|
||||
case 1:
|
||||
|
|
|
@ -82,6 +82,10 @@ func (f *FindCoordinatorResponse) version() int16 {
|
|||
return f.Version
|
||||
}
|
||||
|
||||
func (r *FindCoordinatorResponse) headerVersion() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (f *FindCoordinatorResponse) requiredVersion() KafkaVersion {
|
||||
switch f.Version {
|
||||
case 1:
|
||||
|
|
|
@ -0,0 +1,35 @@
|
|||
module github.com/Shopify/sarama
|
||||
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/eapache/go-resiliency v1.2.0
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21
|
||||
github.com/eapache/queue v1.1.0
|
||||
github.com/fortytw2/leaktest v1.3.0
|
||||
github.com/frankban/quicktest v1.10.2 // indirect
|
||||
github.com/golang/snappy v0.0.1 // indirect
|
||||
github.com/hashicorp/go-uuid v1.0.2 // indirect
|
||||
github.com/jcmturner/gofork v1.0.0
|
||||
github.com/klauspost/compress v1.11.0
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
|
||||
github.com/pierrec/lz4 v2.5.2+incompatible
|
||||
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0
|
||||
github.com/stretchr/testify v1.6.1
|
||||
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c
|
||||
github.com/xdg/stringprep v1.0.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a // indirect
|
||||
golang.org/x/net v0.0.0-20200904194848-62affa334b73
|
||||
golang.org/x/text v0.3.3 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect
|
||||
gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect
|
||||
gopkg.in/jcmturner/dnsutils.v1 v1.0.1 // indirect
|
||||
gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect
|
||||
gopkg.in/jcmturner/gokrb5.v7 v7.5.0
|
||||
gopkg.in/jcmturner/rpc.v1 v1.1.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 // indirect
|
||||
)
|
|
@ -0,0 +1,87 @@
|
|||
github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q=
|
||||
github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
|
||||
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
|
||||
github.com/frankban/quicktest v1.10.2 h1:19ARM85nVi4xH7xPXuc5eM/udya5ieh7b/Sv+d844Tk=
|
||||
github.com/frankban/quicktest v1.10.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s=
|
||||
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE=
|
||||
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8=
|
||||
github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
|
||||
github.com/klauspost/compress v1.11.0 h1:wJbzvpYMVGG9iTI9VxpnNZfd4DzMPoCWze3GgSqz8yg=
|
||||
github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI=
|
||||
github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk=
|
||||
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
|
||||
github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=
|
||||
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM=
|
||||
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20200904194848-62affa334b73 h1:MXfv8rhZWmFeqX3GNZRsd6vOLoaCHjYEX3qkRo3YBUA=
|
||||
golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U=
|
||||
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw=
|
||||
gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo=
|
||||
gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM=
|
||||
gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q=
|
||||
gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI=
|
||||
gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4=
|
||||
gopkg.in/jcmturner/gokrb5.v7 v7.5.0 h1:a9tsXlIDD9SKxotJMK3niV7rPZAJeX2aD/0yg3qlIrg=
|
||||
gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
|
||||
gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU=
|
||||
gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
@ -0,0 +1,258 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/jcmturner/gofork/encoding/asn1"
|
||||
"gopkg.in/jcmturner/gokrb5.v7/asn1tools"
|
||||
"gopkg.in/jcmturner/gokrb5.v7/gssapi"
|
||||
"gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype"
|
||||
"gopkg.in/jcmturner/gokrb5.v7/iana/keyusage"
|
||||
"gopkg.in/jcmturner/gokrb5.v7/messages"
|
||||
"gopkg.in/jcmturner/gokrb5.v7/types"
|
||||
)
|
||||
|
||||
const (
|
||||
TOK_ID_KRB_AP_REQ = 256
|
||||
GSS_API_GENERIC_TAG = 0x60
|
||||
KRB5_USER_AUTH = 1
|
||||
KRB5_KEYTAB_AUTH = 2
|
||||
GSS_API_INITIAL = 1
|
||||
GSS_API_VERIFY = 2
|
||||
GSS_API_FINISH = 3
|
||||
)
|
||||
|
||||
type GSSAPIConfig struct {
|
||||
AuthType int
|
||||
KeyTabPath string
|
||||
KerberosConfigPath string
|
||||
ServiceName string
|
||||
Username string
|
||||
Password string
|
||||
Realm string
|
||||
DisablePAFXFAST bool
|
||||
}
|
||||
|
||||
type GSSAPIKerberosAuth struct {
|
||||
Config *GSSAPIConfig
|
||||
ticket messages.Ticket
|
||||
encKey types.EncryptionKey
|
||||
NewKerberosClientFunc func(config *GSSAPIConfig) (KerberosClient, error)
|
||||
step int
|
||||
}
|
||||
|
||||
type KerberosClient interface {
|
||||
Login() error
|
||||
GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error)
|
||||
Domain() string
|
||||
CName() types.PrincipalName
|
||||
Destroy()
|
||||
}
|
||||
|
||||
/*
|
||||
*
|
||||
* Appends length in big endian before payload, and send it to kafka
|
||||
*
|
||||
*/
|
||||
|
||||
func (krbAuth *GSSAPIKerberosAuth) writePackage(broker *Broker, payload []byte) (int, error) {
|
||||
length := len(payload)
|
||||
finalPackage := make([]byte, length+4) //4 byte length header + payload
|
||||
copy(finalPackage[4:], payload)
|
||||
binary.BigEndian.PutUint32(finalPackage, uint32(length))
|
||||
bytes, err := broker.conn.Write(finalPackage)
|
||||
if err != nil {
|
||||
return bytes, err
|
||||
}
|
||||
return bytes, nil
|
||||
}
|
||||
|
||||
/*
|
||||
*
|
||||
* Read length (4 bytes) and then read the payload
|
||||
*
|
||||
*/
|
||||
|
||||
func (krbAuth *GSSAPIKerberosAuth) readPackage(broker *Broker) ([]byte, int, error) {
|
||||
bytesRead := 0
|
||||
lengthInBytes := make([]byte, 4)
|
||||
bytes, err := io.ReadFull(broker.conn, lengthInBytes)
|
||||
if err != nil {
|
||||
return nil, bytesRead, err
|
||||
}
|
||||
bytesRead += bytes
|
||||
payloadLength := binary.BigEndian.Uint32(lengthInBytes)
|
||||
payloadBytes := make([]byte, payloadLength) // buffer for read..
|
||||
bytes, err = io.ReadFull(broker.conn, payloadBytes) // read bytes
|
||||
if err != nil {
|
||||
return payloadBytes, bytesRead, err
|
||||
}
|
||||
bytesRead += bytes
|
||||
return payloadBytes, bytesRead, nil
|
||||
}
|
||||
|
||||
func (krbAuth *GSSAPIKerberosAuth) newAuthenticatorChecksum() []byte {
|
||||
a := make([]byte, 24)
|
||||
flags := []int{gssapi.ContextFlagInteg, gssapi.ContextFlagConf}
|
||||
binary.LittleEndian.PutUint32(a[:4], 16)
|
||||
for _, i := range flags {
|
||||
f := binary.LittleEndian.Uint32(a[20:24])
|
||||
f |= uint32(i)
|
||||
binary.LittleEndian.PutUint32(a[20:24], f)
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
/*
|
||||
*
|
||||
* Construct Kerberos AP_REQ package, conforming to RFC-4120
|
||||
* https://tools.ietf.org/html/rfc4120#page-84
|
||||
*
|
||||
*/
|
||||
func (krbAuth *GSSAPIKerberosAuth) createKrb5Token(
|
||||
domain string, cname types.PrincipalName,
|
||||
ticket messages.Ticket,
|
||||
sessionKey types.EncryptionKey) ([]byte, error) {
|
||||
auth, err := types.NewAuthenticator(domain, cname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
auth.Cksum = types.Checksum{
|
||||
CksumType: chksumtype.GSSAPI,
|
||||
Checksum: krbAuth.newAuthenticatorChecksum(),
|
||||
}
|
||||
APReq, err := messages.NewAPReq(
|
||||
ticket,
|
||||
sessionKey,
|
||||
auth,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
aprBytes := make([]byte, 2)
|
||||
binary.BigEndian.PutUint16(aprBytes, TOK_ID_KRB_AP_REQ)
|
||||
tb, err := APReq.Marshal()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
aprBytes = append(aprBytes, tb...)
|
||||
return aprBytes, nil
|
||||
}
|
||||
|
||||
/*
|
||||
*
|
||||
* Append the GSS-API header to the payload, conforming to RFC-2743
|
||||
* Section 3.1, Mechanism-Independent Token Format
|
||||
*
|
||||
* https://tools.ietf.org/html/rfc2743#page-81
|
||||
*
|
||||
* GSSAPIHeader + <specific mechanism payload>
|
||||
*
|
||||
*/
|
||||
func (krbAuth *GSSAPIKerberosAuth) appendGSSAPIHeader(payload []byte) ([]byte, error) {
|
||||
oidBytes, err := asn1.Marshal(gssapi.OID(gssapi.OIDKRB5))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tkoLengthBytes := asn1tools.MarshalLengthBytes(len(oidBytes) + len(payload))
|
||||
GSSHeader := append([]byte{GSS_API_GENERIC_TAG}, tkoLengthBytes...)
|
||||
GSSHeader = append(GSSHeader, oidBytes...)
|
||||
GSSPackage := append(GSSHeader, payload...)
|
||||
return GSSPackage, nil
|
||||
}
|
||||
|
||||
func (krbAuth *GSSAPIKerberosAuth) initSecContext(bytes []byte, kerberosClient KerberosClient) ([]byte, error) {
|
||||
switch krbAuth.step {
|
||||
case GSS_API_INITIAL:
|
||||
aprBytes, err := krbAuth.createKrb5Token(
|
||||
kerberosClient.Domain(),
|
||||
kerberosClient.CName(),
|
||||
krbAuth.ticket,
|
||||
krbAuth.encKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
krbAuth.step = GSS_API_VERIFY
|
||||
return krbAuth.appendGSSAPIHeader(aprBytes)
|
||||
case GSS_API_VERIFY:
|
||||
wrapTokenReq := gssapi.WrapToken{}
|
||||
if err := wrapTokenReq.Unmarshal(bytes, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Validate response.
|
||||
isValid, err := wrapTokenReq.Verify(krbAuth.encKey, keyusage.GSSAPI_ACCEPTOR_SEAL)
|
||||
if !isValid {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
wrapTokenResponse, err := gssapi.NewInitiatorWrapToken(wrapTokenReq.Payload, krbAuth.encKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
krbAuth.step = GSS_API_FINISH
|
||||
return wrapTokenResponse.Marshal()
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
/* This does the handshake for authorization */
|
||||
func (krbAuth *GSSAPIKerberosAuth) Authorize(broker *Broker) error {
|
||||
kerberosClient, err := krbAuth.NewKerberosClientFunc(krbAuth.Config)
|
||||
if err != nil {
|
||||
Logger.Printf("Kerberos client error: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = kerberosClient.Login()
|
||||
if err != nil {
|
||||
Logger.Printf("Kerberos client error: %s", err)
|
||||
return err
|
||||
}
|
||||
// Construct SPN using serviceName and host
|
||||
// SPN format: <SERVICE>/<FQDN>
|
||||
|
||||
host := strings.SplitN(broker.addr, ":", 2)[0] // Strip port part
|
||||
spn := fmt.Sprintf("%s/%s", broker.conf.Net.SASL.GSSAPI.ServiceName, host)
|
||||
|
||||
ticket, encKey, err := kerberosClient.GetServiceTicket(spn)
|
||||
|
||||
if err != nil {
|
||||
Logger.Printf("Error getting Kerberos service ticket : %s", err)
|
||||
return err
|
||||
}
|
||||
krbAuth.ticket = ticket
|
||||
krbAuth.encKey = encKey
|
||||
krbAuth.step = GSS_API_INITIAL
|
||||
var receivedBytes []byte = nil
|
||||
defer kerberosClient.Destroy()
|
||||
for {
|
||||
packBytes, err := krbAuth.initSecContext(receivedBytes, kerberosClient)
|
||||
if err != nil {
|
||||
Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err)
|
||||
return err
|
||||
}
|
||||
requestTime := time.Now()
|
||||
bytesWritten, err := krbAuth.writePackage(broker, packBytes)
|
||||
if err != nil {
|
||||
Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err)
|
||||
return err
|
||||
}
|
||||
broker.updateOutgoingCommunicationMetrics(bytesWritten)
|
||||
if krbAuth.step == GSS_API_VERIFY {
|
||||
bytesRead := 0
|
||||
receivedBytes, bytesRead, err = krbAuth.readPackage(broker)
|
||||
requestLatency := time.Since(requestTime)
|
||||
broker.updateIncomingCommunicationMetrics(bytesRead, requestLatency)
|
||||
if err != nil {
|
||||
Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err)
|
||||
return err
|
||||
}
|
||||
} else if krbAuth.step == GSS_API_FINISH {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
|
@ -42,6 +42,10 @@ func (r *HeartbeatRequest) version() int16 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (r *HeartbeatRequest) headerVersion() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (r *HeartbeatRequest) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
||||
|
|
|
@ -27,6 +27,10 @@ func (r *HeartbeatResponse) version() int16 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (r *HeartbeatResponse) headerVersion() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *HeartbeatResponse) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
||||
|
|
|
@ -38,6 +38,10 @@ func (i *InitProducerIDRequest) version() int16 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (i *InitProducerIDRequest) headerVersion() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (i *InitProducerIDRequest) requiredVersion() KafkaVersion {
|
||||
return V0_11_0_0
|
||||
}
|
||||
|
|
|
@ -50,6 +50,10 @@ func (i *InitProducerIDResponse) version() int16 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (i *InitProducerIDResponse) headerVersion() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (i *InitProducerIDResponse) requiredVersion() KafkaVersion {
|
||||
return V0_11_0_0
|
||||
}
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
package sarama
|
||||
|
||||
// ProducerInterceptor allows you to intercept (and possibly mutate) the records
|
||||
// received by the producer before they are published to the Kafka cluster.
|
||||
// https://cwiki.apache.org/confluence/display/KAFKA/KIP-42%3A+Add+Producer+and+Consumer+Interceptors#KIP42:AddProducerandConsumerInterceptors-Motivation
|
||||
type ProducerInterceptor interface {
|
||||
|
||||
// OnSend is called when the producer message is intercepted. Please avoid
|
||||
// modifying the message until it's safe to do so, as this is _not_ a copy
|
||||
// of the message.
|
||||
OnSend(*ProducerMessage)
|
||||
}
|
||||
|
||||
// ConsumerInterceptor allows you to intercept (and possibly mutate) the records
|
||||
// received by the consumer before they are sent to the messages channel.
|
||||
// https://cwiki.apache.org/confluence/display/KAFKA/KIP-42%3A+Add+Producer+and+Consumer+Interceptors#KIP42:AddProducerandConsumerInterceptors-Motivation
|
||||
type ConsumerInterceptor interface {
|
||||
|
||||
// OnConsume is called when the consumed message is intercepted. Please
|
||||
// avoid modifying the message until it's safe to do so, as this is _not_ a
|
||||
// copy of the message.
|
||||
OnConsume(*ConsumerMessage)
|
||||
}
|
||||
|
||||
func (msg *ProducerMessage) safelyApplyInterceptor(interceptor ProducerInterceptor) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
Logger.Printf("Error when calling producer interceptor: %s, %w\n", interceptor, r)
|
||||
}
|
||||
}()
|
||||
|
||||
interceptor.OnSend(msg)
|
||||
}
|
||||
|
||||
func (msg *ConsumerMessage) safelyApplyInterceptor(interceptor ConsumerInterceptor) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
Logger.Printf("Error when calling consumer interceptor: %s, %w\n", interceptor, r)
|
||||
}
|
||||
}()
|
||||
|
||||
interceptor.OnConsume(msg)
|
||||
}
|
|
@ -134,6 +134,10 @@ func (r *JoinGroupRequest) version() int16 {
|
|||
return r.Version
|
||||
}
|
||||
|
||||
func (r *JoinGroupRequest) headerVersion() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (r *JoinGroupRequest) requiredVersion() KafkaVersion {
|
||||
switch r.Version {
|
||||
case 2:
|
||||
|
|
|
@ -123,6 +123,10 @@ func (r *JoinGroupResponse) version() int16 {
|
|||
return r.Version
|
||||
}
|
||||
|
||||
func (r *JoinGroupResponse) headerVersion() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *JoinGroupResponse) requiredVersion() KafkaVersion {
|
||||
switch r.Version {
|
||||
case 2:
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
krb5client "gopkg.in/jcmturner/gokrb5.v7/client"
|
||||
krb5config "gopkg.in/jcmturner/gokrb5.v7/config"
|
||||
"gopkg.in/jcmturner/gokrb5.v7/keytab"
|
||||
"gopkg.in/jcmturner/gokrb5.v7/types"
|
||||
)
|
||||
|
||||
type KerberosGoKrb5Client struct {
|
||||
krb5client.Client
|
||||
}
|
||||
|
||||
func (c *KerberosGoKrb5Client) Domain() string {
|
||||
return c.Credentials.Domain()
|
||||
}
|
||||
|
||||
func (c *KerberosGoKrb5Client) CName() types.PrincipalName {
|
||||
return c.Credentials.CName()
|
||||
}
|
||||
|
||||
// NewKerberosClient creates kerberos client used to obtain TGT and TGS tokens.
|
||||
// It uses pure go Kerberos 5 solution (RFC-4121 and RFC-4120).
|
||||
// uses gokrb5 library underlying which is a pure go kerberos client with some GSS-API capabilities.
|
||||
func NewKerberosClient(config *GSSAPIConfig) (KerberosClient, error) {
|
||||
cfg, err := krb5config.Load(config.KerberosConfigPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return createClient(config, cfg)
|
||||
}
|
||||
|
||||
func createClient(config *GSSAPIConfig, cfg *krb5config.Config) (KerberosClient, error) {
|
||||
var client *krb5client.Client
|
||||
if config.AuthType == KRB5_KEYTAB_AUTH {
|
||||
kt, err := keytab.Load(config.KeyTabPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client = krb5client.NewClientWithKeytab(config.Username, config.Realm, kt, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST))
|
||||
} else {
|
||||
client = krb5client.NewClientWithPassword(config.Username,
|
||||
config.Realm, config.Password, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST))
|
||||
}
|
||||
return &KerberosGoKrb5Client{*client}, nil
|
||||
}
|
|
@ -35,6 +35,10 @@ func (r *LeaveGroupRequest) version() int16 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (r *LeaveGroupRequest) headerVersion() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (r *LeaveGroupRequest) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
||||
|
|
|
@ -27,6 +27,10 @@ func (r *LeaveGroupResponse) version() int16 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (r *LeaveGroupResponse) headerVersion() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *LeaveGroupResponse) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
package sarama
|
||||
|
||||
import "encoding/binary"
|
||||
import (
|
||||
"encoding/binary"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths.
|
||||
type lengthField struct {
|
||||
|
@ -8,6 +11,20 @@ type lengthField struct {
|
|||
length int32
|
||||
}
|
||||
|
||||
var lengthFieldPool = sync.Pool{}
|
||||
|
||||
func acquireLengthField() *lengthField {
|
||||
val := lengthFieldPool.Get()
|
||||
if val != nil {
|
||||
return val.(*lengthField)
|
||||
}
|
||||
return &lengthField{}
|
||||
}
|
||||
|
||||
func releaseLengthField(m *lengthField) {
|
||||
lengthFieldPool.Put(m)
|
||||
}
|
||||
|
||||
func (l *lengthField) decode(pd packetDecoder) error {
|
||||
var err error
|
||||
l.length, err = pd.getInt32()
|
||||
|
|
|
@ -19,6 +19,10 @@ func (r *ListGroupsRequest) version() int16 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (r *ListGroupsRequest) headerVersion() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (r *ListGroupsRequest) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
||||
|
|
|
@ -64,6 +64,10 @@ func (r *ListGroupsResponse) version() int16 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (r *ListGroupsResponse) headerVersion() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *ListGroupsResponse) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
}
|
||||
|
|
98
vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go
generated
vendored
Normal file
98
vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go
generated
vendored
Normal file
|
@ -0,0 +1,98 @@
|
|||
package sarama
|
||||
|
||||
type ListPartitionReassignmentsRequest struct {
|
||||
TimeoutMs int32
|
||||
blocks map[string][]int32
|
||||
Version int16
|
||||
}
|
||||
|
||||
func (r *ListPartitionReassignmentsRequest) encode(pe packetEncoder) error {
|
||||
pe.putInt32(r.TimeoutMs)
|
||||
|
||||
pe.putCompactArrayLength(len(r.blocks))
|
||||
|
||||
for topic, partitions := range r.blocks {
|
||||
if err := pe.putCompactString(topic); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := pe.putCompactInt32Array(partitions); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pe.putEmptyTaggedFieldArray()
|
||||
}
|
||||
|
||||
pe.putEmptyTaggedFieldArray()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ListPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
r.Version = version
|
||||
|
||||
if r.TimeoutMs, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
topicCount, err := pd.getCompactArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if topicCount > 0 {
|
||||
r.blocks = make(map[string][]int32)
|
||||
for i := 0; i < topicCount; i++ {
|
||||
topic, err := pd.getCompactString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
partitionCount, err := pd.getCompactArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.blocks[topic] = make([]int32, partitionCount)
|
||||
for j := 0; j < partitionCount; j++ {
|
||||
partition, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.blocks[topic][j] = partition
|
||||
}
|
||||
if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (r *ListPartitionReassignmentsRequest) key() int16 {
|
||||
return 46
|
||||
}
|
||||
|
||||
func (r *ListPartitionReassignmentsRequest) version() int16 {
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *ListPartitionReassignmentsRequest) headerVersion() int16 {
|
||||
return 2
|
||||
}
|
||||
|
||||
func (r *ListPartitionReassignmentsRequest) requiredVersion() KafkaVersion {
|
||||
return V2_4_0_0
|
||||
}
|
||||
|
||||
func (r *ListPartitionReassignmentsRequest) AddBlock(topic string, partitionIDs []int32) {
|
||||
if r.blocks == nil {
|
||||
r.blocks = make(map[string][]int32)
|
||||
}
|
||||
|
||||
if r.blocks[topic] == nil {
|
||||
r.blocks[topic] = partitionIDs
|
||||
}
|
||||
}
|
169
vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go
generated
vendored
Normal file
169
vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go
generated
vendored
Normal file
|
@ -0,0 +1,169 @@
|
|||
package sarama
|
||||
|
||||
type PartitionReplicaReassignmentsStatus struct {
|
||||
Replicas []int32
|
||||
AddingReplicas []int32
|
||||
RemovingReplicas []int32
|
||||
}
|
||||
|
||||
func (b *PartitionReplicaReassignmentsStatus) encode(pe packetEncoder) error {
|
||||
if err := pe.putCompactInt32Array(b.Replicas); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putCompactInt32Array(b.AddingReplicas); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pe.putCompactInt32Array(b.RemovingReplicas); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pe.putEmptyTaggedFieldArray()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *PartitionReplicaReassignmentsStatus) decode(pd packetDecoder) (err error) {
|
||||
if b.Replicas, err = pd.getCompactInt32Array(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if b.AddingReplicas, err = pd.getCompactInt32Array(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if b.RemovingReplicas, err = pd.getCompactInt32Array(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
type ListPartitionReassignmentsResponse struct {
|
||||
Version int16
|
||||
ThrottleTimeMs int32
|
||||
ErrorCode KError
|
||||
ErrorMessage *string
|
||||
TopicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus
|
||||
}
|
||||
|
||||
func (r *ListPartitionReassignmentsResponse) AddBlock(topic string, partition int32, replicas, addingReplicas, removingReplicas []int32) {
|
||||
if r.TopicStatus == nil {
|
||||
r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus)
|
||||
}
|
||||
partitions := r.TopicStatus[topic]
|
||||
if partitions == nil {
|
||||
partitions = make(map[int32]*PartitionReplicaReassignmentsStatus)
|
||||
r.TopicStatus[topic] = partitions
|
||||
}
|
||||
|
||||
partitions[partition] = &PartitionReplicaReassignmentsStatus{Replicas: replicas, AddingReplicas: addingReplicas, RemovingReplicas: removingReplicas}
|
||||
}
|
||||
|
||||
func (r *ListPartitionReassignmentsResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt32(r.ThrottleTimeMs)
|
||||
pe.putInt16(int16(r.ErrorCode))
|
||||
if err := pe.putNullableCompactString(r.ErrorMessage); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pe.putCompactArrayLength(len(r.TopicStatus))
|
||||
for topic, partitions := range r.TopicStatus {
|
||||
if err := pe.putCompactString(topic); err != nil {
|
||||
return err
|
||||
}
|
||||
pe.putCompactArrayLength(len(partitions))
|
||||
for partition, block := range partitions {
|
||||
pe.putInt32(partition)
|
||||
|
||||
if err := block.encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
pe.putEmptyTaggedFieldArray()
|
||||
}
|
||||
|
||||
pe.putEmptyTaggedFieldArray()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ListPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
r.Version = version
|
||||
|
||||
if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
kerr, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.ErrorCode = KError(kerr)
|
||||
|
||||
if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numTopics, err := pd.getCompactArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus, numTopics)
|
||||
for i := 0; i < numTopics; i++ {
|
||||
topic, err := pd.getCompactString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ongoingPartitionReassignments, err := pd.getCompactArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.TopicStatus[topic] = make(map[int32]*PartitionReplicaReassignmentsStatus, ongoingPartitionReassignments)
|
||||
|
||||
for j := 0; j < ongoingPartitionReassignments; j++ {
|
||||
partition, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
block := &PartitionReplicaReassignmentsStatus{}
|
||||
if err := block.decode(pd); err != nil {
|
||||
return err
|
||||
}
|
||||
r.TopicStatus[topic][partition] = block
|
||||
}
|
||||
|
||||
if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ListPartitionReassignmentsResponse) key() int16 {
|
||||
return 46
|
||||
}
|
||||
|
||||
func (r *ListPartitionReassignmentsResponse) version() int16 {
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *ListPartitionReassignmentsResponse) headerVersion() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (r *ListPartitionReassignmentsResponse) requiredVersion() KafkaVersion {
|
||||
return V2_4_0_0
|
||||
}
|
|
@ -1,46 +1,52 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
)
|
||||
|
||||
"github.com/eapache/go-xerial-snappy"
|
||||
"github.com/pierrec/lz4"
|
||||
const (
|
||||
//CompressionNone no compression
|
||||
CompressionNone CompressionCodec = iota
|
||||
//CompressionGZIP compression using GZIP
|
||||
CompressionGZIP
|
||||
//CompressionSnappy compression using snappy
|
||||
CompressionSnappy
|
||||
//CompressionLZ4 compression using LZ4
|
||||
CompressionLZ4
|
||||
//CompressionZSTD compression using ZSTD
|
||||
CompressionZSTD
|
||||
|
||||
// The lowest 3 bits contain the compression codec used for the message
|
||||
compressionCodecMask int8 = 0x07
|
||||
|
||||
// Bit 3 set for "LogAppend" timestamps
|
||||
timestampTypeMask = 0x08
|
||||
|
||||
// CompressionLevelDefault is the constant to use in CompressionLevel
|
||||
// to have the default compression level for any codec. The value is picked
|
||||
// that we don't use any existing compression levels.
|
||||
CompressionLevelDefault = -1000
|
||||
)
|
||||
|
||||
// CompressionCodec represents the various compression codecs recognized by Kafka in messages.
|
||||
type CompressionCodec int8
|
||||
|
||||
// only the last two bits are really used
|
||||
const compressionCodecMask int8 = 0x03
|
||||
|
||||
const (
|
||||
CompressionNone CompressionCodec = 0
|
||||
CompressionGZIP CompressionCodec = 1
|
||||
CompressionSnappy CompressionCodec = 2
|
||||
CompressionLZ4 CompressionCodec = 3
|
||||
)
|
||||
|
||||
func (cc CompressionCodec) String() string {
|
||||
return []string{
|
||||
"none",
|
||||
"gzip",
|
||||
"snappy",
|
||||
"lz4",
|
||||
"zstd",
|
||||
}[int(cc)]
|
||||
}
|
||||
|
||||
// CompressionLevelDefault is the constant to use in CompressionLevel
|
||||
// to have the default compression level for any codec. The value is picked
|
||||
// that we don't use any existing compression levels.
|
||||
const CompressionLevelDefault = -1000
|
||||
|
||||
//Message is a kafka message type
|
||||
type Message struct {
|
||||
Codec CompressionCodec // codec used to compress the message contents
|
||||
CompressionLevel int // compression level
|
||||
LogAppendTime bool // the used timestamp is LogAppendTime
|
||||
Key []byte // the message key, may be nil
|
||||
Value []byte // the message contents
|
||||
Set *MessageSet // the message set a message might wrap
|
||||
|
@ -57,6 +63,9 @@ func (m *Message) encode(pe packetEncoder) error {
|
|||
pe.putInt8(m.Version)
|
||||
|
||||
attributes := int8(m.Codec) & compressionCodecMask
|
||||
if m.LogAppendTime {
|
||||
attributes |= timestampTypeMask
|
||||
}
|
||||
pe.putInt8(attributes)
|
||||
|
||||
if m.Version >= 1 {
|
||||
|
@ -76,47 +85,11 @@ func (m *Message) encode(pe packetEncoder) error {
|
|||
payload = m.compressedCache
|
||||
m.compressedCache = nil
|
||||
} else if m.Value != nil {
|
||||
switch m.Codec {
|
||||
case CompressionNone:
|
||||
payload = m.Value
|
||||
case CompressionGZIP:
|
||||
var buf bytes.Buffer
|
||||
var writer *gzip.Writer
|
||||
if m.CompressionLevel != CompressionLevelDefault {
|
||||
writer, err = gzip.NewWriterLevel(&buf, m.CompressionLevel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
writer = gzip.NewWriter(&buf)
|
||||
}
|
||||
if _, err = writer.Write(m.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = writer.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
m.compressedCache = buf.Bytes()
|
||||
payload = m.compressedCache
|
||||
case CompressionSnappy:
|
||||
tmp := snappy.Encode(m.Value)
|
||||
m.compressedCache = tmp
|
||||
payload = m.compressedCache
|
||||
case CompressionLZ4:
|
||||
var buf bytes.Buffer
|
||||
writer := lz4.NewWriter(&buf)
|
||||
if _, err = writer.Write(m.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = writer.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
m.compressedCache = buf.Bytes()
|
||||
payload = m.compressedCache
|
||||
|
||||
default:
|
||||
return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", m.Codec)}
|
||||
payload, err = compress(m.Codec, m.CompressionLevel, m.Value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.compressedCache = payload
|
||||
// Keep in mind the compressed payload size for metric gathering
|
||||
m.compressedSize = len(payload)
|
||||
}
|
||||
|
@ -129,7 +102,10 @@ func (m *Message) encode(pe packetEncoder) error {
|
|||
}
|
||||
|
||||
func (m *Message) decode(pd packetDecoder) (err error) {
|
||||
err = pd.push(newCRC32Field(crcIEEE))
|
||||
crc32Decoder := acquireCrc32Field(crcIEEE)
|
||||
defer releaseCrc32Field(crc32Decoder)
|
||||
|
||||
err = pd.push(crc32Decoder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -148,6 +124,7 @@ func (m *Message) decode(pd packetDecoder) (err error) {
|
|||
return err
|
||||
}
|
||||
m.Codec = CompressionCodec(attribute & compressionCodecMask)
|
||||
m.LogAppendTime = attribute×tampTypeMask == timestampTypeMask
|
||||
|
||||
if m.Version == 1 {
|
||||
if err := (Timestamp{&m.Timestamp}).decode(pd); err != nil {
|
||||
|
@ -172,50 +149,24 @@ func (m *Message) decode(pd packetDecoder) (err error) {
|
|||
switch m.Codec {
|
||||
case CompressionNone:
|
||||
// nothing to do
|
||||
case CompressionGZIP:
|
||||
default:
|
||||
if m.Value == nil {
|
||||
break
|
||||
}
|
||||
reader, err := gzip.NewReader(bytes.NewReader(m.Value))
|
||||
|
||||
m.Value, err = decompress(m.Codec, m.Value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if m.Value, err = ioutil.ReadAll(reader); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := m.decodeSet(); err != nil {
|
||||
return err
|
||||
}
|
||||
case CompressionSnappy:
|
||||
if m.Value == nil {
|
||||
break
|
||||
}
|
||||
if m.Value, err = snappy.Decode(m.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := m.decodeSet(); err != nil {
|
||||
return err
|
||||
}
|
||||
case CompressionLZ4:
|
||||
if m.Value == nil {
|
||||
break
|
||||
}
|
||||
reader := lz4.NewReader(bytes.NewReader(m.Value))
|
||||
if m.Value, err = ioutil.ReadAll(reader); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := m.decodeSet(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", m.Codec)}
|
||||
}
|
||||
|
||||
return pd.pop()
|
||||
}
|
||||
|
||||
// decodes a message set from a previousy encoded bulk-message
|
||||
// decodes a message set from a previously encoded bulk-message
|
||||
func (m *Message) decodeSet() (err error) {
|
||||
pd := realDecoder{raw: m.Value}
|
||||
m.Set = &MessageSet{}
|
||||
|
|
|
@ -29,7 +29,10 @@ func (msb *MessageBlock) decode(pd packetDecoder) (err error) {
|
|||
return err
|
||||
}
|
||||
|
||||
if err = pd.push(&lengthField{}); err != nil {
|
||||
lengthDecoder := acquireLengthField()
|
||||
defer releaseLengthField(lengthDecoder)
|
||||
|
||||
if err = pd.push(lengthDecoder); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -37,15 +37,8 @@ func (r *MetadataRequest) decode(pd packetDecoder, version int16) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if size < 0 {
|
||||
return nil
|
||||
} else {
|
||||
topicCount := size
|
||||
if topicCount == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
r.Topics = make([]string, topicCount)
|
||||
if size > 0 {
|
||||
r.Topics = make([]string, size)
|
||||
for i := range r.Topics {
|
||||
topic, err := pd.getString()
|
||||
if err != nil {
|
||||
|
@ -72,6 +65,10 @@ func (r *MetadataRequest) version() int16 {
|
|||
return r.Version
|
||||
}
|
||||
|
||||
func (r *MetadataRequest) headerVersion() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (r *MetadataRequest) requiredVersion() KafkaVersion {
|
||||
switch r.Version {
|
||||
case 1:
|
||||
|
|
|
@ -255,6 +255,10 @@ func (r *MetadataResponse) version() int16 {
|
|||
return r.Version
|
||||
}
|
||||
|
||||
func (r *MetadataResponse) headerVersion() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *MetadataResponse) requiredVersion() KafkaVersion {
|
||||
switch r.Version {
|
||||
case 1:
|
||||
|
@ -296,7 +300,7 @@ foundTopic:
|
|||
return tmatch
|
||||
}
|
||||
|
||||
func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, err KError) {
|
||||
func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, offline []int32, err KError) {
|
||||
tmatch := r.AddTopic(topic, ErrNoError)
|
||||
var pmatch *PartitionMetadata
|
||||
|
||||
|
@ -316,6 +320,6 @@ foundPartition:
|
|||
pmatch.Leader = brokerID
|
||||
pmatch.Replicas = replicas
|
||||
pmatch.Isr = isr
|
||||
pmatch.OfflineReplicas = offline
|
||||
pmatch.Err = err
|
||||
|
||||
}
|
||||
|
|
|
@ -28,14 +28,6 @@ func getMetricNameForBroker(name string, broker *Broker) string {
|
|||
return fmt.Sprintf(name+"-for-broker-%d", broker.ID())
|
||||
}
|
||||
|
||||
func getOrRegisterBrokerMeter(name string, broker *Broker, r metrics.Registry) metrics.Meter {
|
||||
return metrics.GetOrRegisterMeter(getMetricNameForBroker(name, broker), r)
|
||||
}
|
||||
|
||||
func getOrRegisterBrokerHistogram(name string, broker *Broker, r metrics.Registry) metrics.Histogram {
|
||||
return getOrRegisterHistogram(getMetricNameForBroker(name, broker), r)
|
||||
}
|
||||
|
||||
func getMetricNameForTopic(name string, topic string) string {
|
||||
// Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy
|
||||
// cf. KAFKA-1902 and KAFKA-2337
|
||||
|
|
|
@ -18,7 +18,9 @@ const (
|
|||
expectationTimeout = 500 * time.Millisecond
|
||||
)
|
||||
|
||||
type requestHandlerFunc func(req *request) (res encoder)
|
||||
type GSSApiHandlerFunc func([]byte) []byte
|
||||
|
||||
type requestHandlerFunc func(req *request) (res encoderWithHeader)
|
||||
|
||||
// RequestNotifierFunc is invoked when a mock broker processes a request successfully
|
||||
// and will provides the number of bytes read and written.
|
||||
|
@ -49,18 +51,19 @@ type RequestNotifierFunc func(bytesRead, bytesWritten int)
|
|||
// It is not necessary to prefix message length or correlation ID to your
|
||||
// response bytes, the server does that automatically as a convenience.
|
||||
type MockBroker struct {
|
||||
brokerID int32
|
||||
port int32
|
||||
closing chan none
|
||||
stopper chan none
|
||||
expectations chan encoder
|
||||
listener net.Listener
|
||||
t TestReporter
|
||||
latency time.Duration
|
||||
handler requestHandlerFunc
|
||||
notifier RequestNotifierFunc
|
||||
history []RequestResponse
|
||||
lock sync.Mutex
|
||||
brokerID int32
|
||||
port int32
|
||||
closing chan none
|
||||
stopper chan none
|
||||
expectations chan encoderWithHeader
|
||||
listener net.Listener
|
||||
t TestReporter
|
||||
latency time.Duration
|
||||
handler requestHandlerFunc
|
||||
notifier RequestNotifierFunc
|
||||
history []RequestResponse
|
||||
lock sync.Mutex
|
||||
gssApiHandler GSSApiHandlerFunc
|
||||
}
|
||||
|
||||
// RequestResponse represents a Request/Response pair processed by MockBroker.
|
||||
|
@ -80,7 +83,7 @@ func (b *MockBroker) SetLatency(latency time.Duration) {
|
|||
// and uses the found MockResponse instance to generate an appropriate reply.
|
||||
// If the request type is not found in the map then nothing is sent.
|
||||
func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) {
|
||||
b.setHandler(func(req *request) (res encoder) {
|
||||
b.setHandler(func(req *request) (res encoderWithHeader) {
|
||||
reqTypeName := reflect.TypeOf(req.body).Elem().Name()
|
||||
mockResponse := handlerMap[reqTypeName]
|
||||
if mockResponse == nil {
|
||||
|
@ -173,7 +176,44 @@ func (b *MockBroker) serverLoop() {
|
|||
Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err)
|
||||
}
|
||||
|
||||
func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) {
|
||||
func (b *MockBroker) SetGSSAPIHandler(handler GSSApiHandlerFunc) {
|
||||
b.gssApiHandler = handler
|
||||
}
|
||||
|
||||
func (b *MockBroker) readToBytes(r io.Reader) ([]byte, error) {
|
||||
var (
|
||||
bytesRead int
|
||||
lengthBytes = make([]byte, 4)
|
||||
)
|
||||
|
||||
if _, err := io.ReadFull(r, lengthBytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bytesRead += len(lengthBytes)
|
||||
length := int32(binary.BigEndian.Uint32(lengthBytes))
|
||||
|
||||
if length <= 4 || length > MaxRequestSize {
|
||||
return nil, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
|
||||
}
|
||||
|
||||
encodedReq := make([]byte, length)
|
||||
if _, err := io.ReadFull(r, encodedReq); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bytesRead += len(encodedReq)
|
||||
|
||||
fullBytes := append(lengthBytes, encodedReq...)
|
||||
|
||||
return fullBytes, nil
|
||||
}
|
||||
|
||||
func (b *MockBroker) isGSSAPI(buffer []byte) bool {
|
||||
return buffer[4] == 0x60 || bytes.Equal(buffer[4:6], []byte{0x05, 0x04})
|
||||
}
|
||||
|
||||
func (b *MockBroker) handleRequests(conn io.ReadWriteCloser, idx int, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
|
@ -191,65 +231,110 @@ func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup)
|
|||
}
|
||||
}()
|
||||
|
||||
resHeader := make([]byte, 8)
|
||||
var bytesWritten int
|
||||
var bytesRead int
|
||||
for {
|
||||
req, bytesRead, err := decodeRequest(conn)
|
||||
buffer, err := b.readToBytes(conn)
|
||||
if err != nil {
|
||||
Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req))
|
||||
Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(buffer))
|
||||
b.serverError(err)
|
||||
break
|
||||
}
|
||||
|
||||
if b.latency > 0 {
|
||||
time.Sleep(b.latency)
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
res := b.handler(req)
|
||||
b.history = append(b.history, RequestResponse{req.body, res})
|
||||
b.lock.Unlock()
|
||||
|
||||
if res == nil {
|
||||
Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req))
|
||||
continue
|
||||
}
|
||||
Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res)
|
||||
|
||||
encodedRes, err := encode(res, nil)
|
||||
if err != nil {
|
||||
b.serverError(err)
|
||||
break
|
||||
}
|
||||
if len(encodedRes) == 0 {
|
||||
b.lock.Lock()
|
||||
if b.notifier != nil {
|
||||
b.notifier(bytesRead, 0)
|
||||
bytesWritten = 0
|
||||
if !b.isGSSAPI(buffer) {
|
||||
req, br, err := decodeRequest(bytes.NewReader(buffer))
|
||||
bytesRead = br
|
||||
if err != nil {
|
||||
Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req))
|
||||
b.serverError(err)
|
||||
break
|
||||
}
|
||||
b.lock.Unlock()
|
||||
continue
|
||||
}
|
||||
|
||||
binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4))
|
||||
binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID))
|
||||
if _, err = conn.Write(resHeader); err != nil {
|
||||
b.serverError(err)
|
||||
break
|
||||
}
|
||||
if _, err = conn.Write(encodedRes); err != nil {
|
||||
b.serverError(err)
|
||||
break
|
||||
if b.latency > 0 {
|
||||
time.Sleep(b.latency)
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
res := b.handler(req)
|
||||
b.history = append(b.history, RequestResponse{req.body, res})
|
||||
b.lock.Unlock()
|
||||
|
||||
if res == nil {
|
||||
Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req))
|
||||
continue
|
||||
}
|
||||
Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res)
|
||||
|
||||
encodedRes, err := encode(res, nil)
|
||||
if err != nil {
|
||||
b.serverError(err)
|
||||
break
|
||||
}
|
||||
if len(encodedRes) == 0 {
|
||||
b.lock.Lock()
|
||||
if b.notifier != nil {
|
||||
b.notifier(bytesRead, 0)
|
||||
}
|
||||
b.lock.Unlock()
|
||||
continue
|
||||
}
|
||||
|
||||
resHeader := b.encodeHeader(res.headerVersion(), req.correlationID, uint32(len(encodedRes)))
|
||||
if _, err = conn.Write(resHeader); err != nil {
|
||||
b.serverError(err)
|
||||
break
|
||||
}
|
||||
if _, err = conn.Write(encodedRes); err != nil {
|
||||
b.serverError(err)
|
||||
break
|
||||
}
|
||||
bytesWritten = len(resHeader) + len(encodedRes)
|
||||
} else {
|
||||
// GSSAPI is not part of kafka protocol, but is supported for authentication proposes.
|
||||
// Don't support history for this kind of request as is only used for test GSSAPI authentication mechanism
|
||||
b.lock.Lock()
|
||||
res := b.gssApiHandler(buffer)
|
||||
b.lock.Unlock()
|
||||
if res == nil {
|
||||
Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(buffer))
|
||||
continue
|
||||
}
|
||||
if _, err = conn.Write(res); err != nil {
|
||||
b.serverError(err)
|
||||
break
|
||||
}
|
||||
bytesWritten = len(res)
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
if b.notifier != nil {
|
||||
b.notifier(bytesRead, len(resHeader)+len(encodedRes))
|
||||
b.notifier(bytesRead, bytesWritten)
|
||||
}
|
||||
b.lock.Unlock()
|
||||
}
|
||||
Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err)
|
||||
}
|
||||
|
||||
func (b *MockBroker) defaultRequestHandler(req *request) (res encoder) {
|
||||
func (b *MockBroker) encodeHeader(headerVersion int16, correlationId int32, payloadLength uint32) []byte {
|
||||
headerLength := uint32(8)
|
||||
|
||||
if headerVersion >= 1 {
|
||||
headerLength = 9
|
||||
}
|
||||
|
||||
resHeader := make([]byte, headerLength)
|
||||
binary.BigEndian.PutUint32(resHeader, payloadLength+headerLength-4)
|
||||
binary.BigEndian.PutUint32(resHeader[4:], uint32(correlationId))
|
||||
|
||||
if headerVersion >= 1 {
|
||||
binary.PutUvarint(resHeader[8:], 0)
|
||||
}
|
||||
|
||||
return resHeader
|
||||
}
|
||||
|
||||
func (b *MockBroker) defaultRequestHandler(req *request) (res encoderWithHeader) {
|
||||
select {
|
||||
case res, ok := <-b.expectations:
|
||||
if !ok {
|
||||
|
@ -304,7 +389,7 @@ func NewMockBrokerListener(t TestReporter, brokerID int32, listener net.Listener
|
|||
stopper: make(chan none),
|
||||
t: t,
|
||||
brokerID: brokerID,
|
||||
expectations: make(chan encoder, 512),
|
||||
expectations: make(chan encoderWithHeader, 512),
|
||||
listener: listener,
|
||||
}
|
||||
broker.handler = broker.defaultRequestHandler
|
||||
|
@ -325,6 +410,6 @@ func NewMockBrokerListener(t TestReporter, brokerID int32, listener net.Listener
|
|||
return broker
|
||||
}
|
||||
|
||||
func (b *MockBroker) Returns(e encoder) {
|
||||
func (b *MockBroker) Returns(e encoderWithHeader) {
|
||||
b.expectations <- e
|
||||
}
|
||||
|
|
|
@ -0,0 +1,123 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
|
||||
"gopkg.in/jcmturner/gokrb5.v7/credentials"
|
||||
"gopkg.in/jcmturner/gokrb5.v7/gssapi"
|
||||
"gopkg.in/jcmturner/gokrb5.v7/iana/keyusage"
|
||||
"gopkg.in/jcmturner/gokrb5.v7/messages"
|
||||
"gopkg.in/jcmturner/gokrb5.v7/types"
|
||||
)
|
||||
|
||||
type KafkaGSSAPIHandler struct {
|
||||
client *MockKerberosClient
|
||||
badResponse bool
|
||||
badKeyChecksum bool
|
||||
}
|
||||
|
||||
func (h *KafkaGSSAPIHandler) MockKafkaGSSAPI(buffer []byte) []byte {
|
||||
// Default payload used for verify
|
||||
err := h.client.Login() // Mock client construct keys when login
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if h.badResponse { // Returns trash
|
||||
return []byte{0x00, 0x00, 0x00, 0x01, 0xAD}
|
||||
}
|
||||
|
||||
var pack = gssapi.WrapToken{
|
||||
Flags: KRB5_USER_AUTH,
|
||||
EC: 12,
|
||||
RRC: 0,
|
||||
SndSeqNum: 3398292281,
|
||||
Payload: []byte{0x11, 0x00}, // 1100
|
||||
}
|
||||
// Compute checksum
|
||||
if h.badKeyChecksum {
|
||||
pack.CheckSum = []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}
|
||||
} else {
|
||||
err = pack.SetCheckSum(h.client.ASRep.DecryptedEncPart.Key, keyusage.GSSAPI_ACCEPTOR_SEAL)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
packBytes, err := pack.Marshal()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
lenBytes := len(packBytes)
|
||||
response := make([]byte, lenBytes+4)
|
||||
copy(response[4:], packBytes)
|
||||
binary.BigEndian.PutUint32(response, uint32(lenBytes))
|
||||
return response
|
||||
}
|
||||
|
||||
type MockKerberosClient struct {
|
||||
asRepBytes string
|
||||
ASRep messages.ASRep
|
||||
credentials *credentials.Credentials
|
||||
mockError error
|
||||
errorStage string
|
||||
}
|
||||
|
||||
func (c *MockKerberosClient) Login() error {
|
||||
if c.errorStage == "login" && c.mockError != nil {
|
||||
return c.mockError
|
||||
}
|
||||
c.asRepBytes = "6b8202e9308202e5a003020105a10302010ba22b30293027a103020113a220041e301c301aa003020112a1131b114" +
|
||||
"558414d504c452e434f4d636c69656e74a30d1b0b4558414d504c452e434f4da4133011a003020101a10a30081b06636c69656e7" +
|
||||
"4a5820156618201523082014ea003020105a10d1b0b4558414d504c452e434f4da220301ea003020102a11730151b066b7262746" +
|
||||
"7741b0b4558414d504c452e434f4da382011430820110a003020112a103020101a28201020481ffdb9891175d106818e61008c51" +
|
||||
"d0b3462bca92f3bf9d4cfa82de4c4d7aff9994ec87c573e3a3d54dcb2bb79618c76f2bf4a3d006f90d5bdbd049bc18f48be39203" +
|
||||
"549ca02acaf63f292b12404f9b74c34b83687119d8f56552ccc0c50ebee2a53bb114c1b4619bb1d5d31f0f49b4d40a08a9b4c046" +
|
||||
"2e1398d0b648be1c0e50c552ad16e1d8d8e74263dd0bf0ec591e4797dfd40a9a1be4ae830d03a306e053fd7586fef84ffc5e4a83" +
|
||||
"7c3122bf3e6a40fe87e84019f6283634461b955712b44a5f7386c278bff94ec2c2dc0403247e29c2450e853471ceababf9b8911f" +
|
||||
"997f2e3010b046d2c49eb438afb0f4c210821e80d4ffa4c9521eb895dcd68610b3feaa682012c30820128a003020112a282011f0" +
|
||||
"482011bce73cbce3f1dd17661c412005f0f2257c756fe8e98ff97e6ec24b7bab66e5fd3a3827aeeae4757af0c6e892948122d8b2" +
|
||||
"03c8df48df0ef5d142d0e416d688f11daa0fcd63d96bdd431d02b8e951c664eeff286a2be62383d274a04016d5f0e141da58cb86" +
|
||||
"331de64063062f4f885e8e9ce5b181ca2fdc67897c5995e0ae1ae0c171a64493ff7bd91bc6d89cd4fce1e2b3ea0a10e34b0d5eda" +
|
||||
"aa38ee727b50c5632ed1d2f2b457908e616178d0d80b72af209fb8ac9dbaa1768fa45931392b36b6d8c12400f8ded2efaa0654d0" +
|
||||
"da1db966e8b5aab4706c800f95d559664646041fdb38b411c62fc0fbe0d25083a28562b0e1c8df16e62e9d5626b0addee489835f" +
|
||||
"eedb0f26c05baa596b69b17f47920aa64b29dc77cfcc97ba47885"
|
||||
apRepBytes, err := hex.DecodeString(c.asRepBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = c.ASRep.Unmarshal(apRepBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.credentials = credentials.New("client", "EXAMPLE.COM").WithPassword("qwerty")
|
||||
_, err = c.ASRep.DecryptEncPart(c.credentials)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *MockKerberosClient) GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error) {
|
||||
if c.errorStage == "service_ticket" && c.mockError != nil {
|
||||
return messages.Ticket{}, types.EncryptionKey{}, c.mockError
|
||||
}
|
||||
return c.ASRep.Ticket, c.ASRep.DecryptedEncPart.Key, nil
|
||||
}
|
||||
|
||||
func (c *MockKerberosClient) Domain() string {
|
||||
return "EXAMPLE.COM"
|
||||
}
|
||||
func (c *MockKerberosClient) CName() types.PrincipalName {
|
||||
var p = types.PrincipalName{
|
||||
NameType: KRB5_USER_AUTH,
|
||||
NameString: []string{
|
||||
"kafka",
|
||||
"kafka",
|
||||
},
|
||||
}
|
||||
return p
|
||||
}
|
||||
func (c *MockKerberosClient) Destroy() {
|
||||
// Do nothing.
|
||||
}
|
|
@ -2,6 +2,7 @@ package sarama
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// TestReporter has methods matching go's testing.T to avoid importing
|
||||
|
@ -17,20 +18,20 @@ type TestReporter interface {
|
|||
// allows generating a response based on a request body. MockResponses are used
|
||||
// to program behavior of MockBroker in tests.
|
||||
type MockResponse interface {
|
||||
For(reqBody versionedDecoder) (res encoder)
|
||||
For(reqBody versionedDecoder) (res encoderWithHeader)
|
||||
}
|
||||
|
||||
// MockWrapper is a mock response builder that returns a particular concrete
|
||||
// response regardless of the actual request passed to the `For` method.
|
||||
type MockWrapper struct {
|
||||
res encoder
|
||||
res encoderWithHeader
|
||||
}
|
||||
|
||||
func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoder) {
|
||||
func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoderWithHeader) {
|
||||
return mw.res
|
||||
}
|
||||
|
||||
func NewMockWrapper(res encoder) *MockWrapper {
|
||||
func NewMockWrapper(res encoderWithHeader) *MockWrapper {
|
||||
return &MockWrapper{res: res}
|
||||
}
|
||||
|
||||
|
@ -49,7 +50,7 @@ func NewMockSequence(responses ...interface{}) *MockSequence {
|
|||
switch res := res.(type) {
|
||||
case MockResponse:
|
||||
ms.responses[i] = res
|
||||
case encoder:
|
||||
case encoderWithHeader:
|
||||
ms.responses[i] = NewMockWrapper(res)
|
||||
default:
|
||||
panic(fmt.Sprintf("Unexpected response type: %T", res))
|
||||
|
@ -58,7 +59,7 @@ func NewMockSequence(responses ...interface{}) *MockSequence {
|
|||
return ms
|
||||
}
|
||||
|
||||
func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) {
|
||||
func (mc *MockSequence) For(reqBody versionedDecoder) (res encoderWithHeader) {
|
||||
res = mc.responses[0].For(reqBody)
|
||||
if len(mc.responses) > 1 {
|
||||
mc.responses = mc.responses[1:]
|
||||
|
@ -66,6 +67,69 @@ func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) {
|
|||
return res
|
||||
}
|
||||
|
||||
type MockListGroupsResponse struct {
|
||||
groups map[string]string
|
||||
t TestReporter
|
||||
}
|
||||
|
||||
func NewMockListGroupsResponse(t TestReporter) *MockListGroupsResponse {
|
||||
return &MockListGroupsResponse{
|
||||
groups: make(map[string]string),
|
||||
t: t,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MockListGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader {
|
||||
request := reqBody.(*ListGroupsRequest)
|
||||
_ = request
|
||||
response := &ListGroupsResponse{
|
||||
Groups: m.groups,
|
||||
}
|
||||
return response
|
||||
}
|
||||
|
||||
func (m *MockListGroupsResponse) AddGroup(groupID, protocolType string) *MockListGroupsResponse {
|
||||
m.groups[groupID] = protocolType
|
||||
return m
|
||||
}
|
||||
|
||||
type MockDescribeGroupsResponse struct {
|
||||
groups map[string]*GroupDescription
|
||||
t TestReporter
|
||||
}
|
||||
|
||||
func NewMockDescribeGroupsResponse(t TestReporter) *MockDescribeGroupsResponse {
|
||||
return &MockDescribeGroupsResponse{
|
||||
t: t,
|
||||
groups: make(map[string]*GroupDescription),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MockDescribeGroupsResponse) AddGroupDescription(groupID string, description *GroupDescription) *MockDescribeGroupsResponse {
|
||||
m.groups[groupID] = description
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *MockDescribeGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader {
|
||||
request := reqBody.(*DescribeGroupsRequest)
|
||||
|
||||
response := &DescribeGroupsResponse{}
|
||||
for _, requestedGroup := range request.Groups {
|
||||
if group, ok := m.groups[requestedGroup]; ok {
|
||||
response.Groups = append(response.Groups, group)
|
||||
} else {
|
||||
// Mimic real kafka - if a group doesn't exist, return
|
||||
// an entry with state "Dead"
|
||||
response.Groups = append(response.Groups, &GroupDescription{
|
||||
GroupId: requestedGroup,
|
||||
State: "Dead",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return response
|
||||
}
|
||||
|
||||
// MockMetadataResponse is a `MetadataResponse` builder.
|
||||
type MockMetadataResponse struct {
|
||||
controllerID int32
|
||||
|
@ -102,7 +166,7 @@ func (mmr *MockMetadataResponse) SetController(brokerID int32) *MockMetadataResp
|
|||
return mmr
|
||||
}
|
||||
|
||||
func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder {
|
||||
func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader {
|
||||
metadataRequest := reqBody.(*MetadataRequest)
|
||||
metadataResponse := &MetadataResponse{
|
||||
Version: metadataRequest.version(),
|
||||
|
@ -111,17 +175,25 @@ func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder {
|
|||
for addr, brokerID := range mmr.brokers {
|
||||
metadataResponse.AddBroker(addr, brokerID)
|
||||
}
|
||||
|
||||
// Generate set of replicas
|
||||
var replicas []int32
|
||||
var offlineReplicas []int32
|
||||
for _, brokerID := range mmr.brokers {
|
||||
replicas = append(replicas, brokerID)
|
||||
}
|
||||
|
||||
if len(metadataRequest.Topics) == 0 {
|
||||
for topic, partitions := range mmr.leaders {
|
||||
for partition, brokerID := range partitions {
|
||||
metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError)
|
||||
metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError)
|
||||
}
|
||||
}
|
||||
return metadataResponse
|
||||
}
|
||||
for _, topic := range metadataRequest.Topics {
|
||||
for partition, brokerID := range mmr.leaders[topic] {
|
||||
metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError)
|
||||
metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError)
|
||||
}
|
||||
}
|
||||
return metadataResponse
|
||||
|
@ -161,7 +233,7 @@ func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, of
|
|||
return mor
|
||||
}
|
||||
|
||||
func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoder {
|
||||
func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoderWithHeader {
|
||||
offsetRequest := reqBody.(*OffsetRequest)
|
||||
offsetResponse := &OffsetResponse{Version: mor.version}
|
||||
for topic, partitions := range offsetRequest.blocks {
|
||||
|
@ -237,7 +309,7 @@ func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, of
|
|||
return mfr
|
||||
}
|
||||
|
||||
func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder {
|
||||
func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoderWithHeader {
|
||||
fetchRequest := reqBody.(*FetchRequest)
|
||||
res := &FetchResponse{
|
||||
Version: mfr.version,
|
||||
|
@ -321,7 +393,7 @@ func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *M
|
|||
return mr
|
||||
}
|
||||
|
||||
func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoder {
|
||||
func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader {
|
||||
req := reqBody.(*ConsumerMetadataRequest)
|
||||
group := req.ConsumerGroup
|
||||
res := &ConsumerMetadataResponse{}
|
||||
|
@ -370,7 +442,7 @@ func (mr *MockFindCoordinatorResponse) SetError(coordinatorType CoordinatorType,
|
|||
return mr
|
||||
}
|
||||
|
||||
func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoder {
|
||||
func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoderWithHeader {
|
||||
req := reqBody.(*FindCoordinatorRequest)
|
||||
res := &FindCoordinatorResponse{}
|
||||
var v interface{}
|
||||
|
@ -417,7 +489,7 @@ func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int3
|
|||
return mr
|
||||
}
|
||||
|
||||
func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoder {
|
||||
func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoderWithHeader {
|
||||
req := reqBody.(*OffsetCommitRequest)
|
||||
group := req.ConsumerGroup
|
||||
res := &OffsetCommitResponse{}
|
||||
|
@ -474,7 +546,7 @@ func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KE
|
|||
return mr
|
||||
}
|
||||
|
||||
func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder {
|
||||
func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoderWithHeader {
|
||||
req := reqBody.(*ProduceRequest)
|
||||
res := &ProduceResponse{
|
||||
Version: mr.version,
|
||||
|
@ -502,6 +574,7 @@ func (mr *MockProduceResponse) getError(topic string, partition int32) KError {
|
|||
// MockOffsetFetchResponse is a `OffsetFetchResponse` builder.
|
||||
type MockOffsetFetchResponse struct {
|
||||
offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock
|
||||
error KError
|
||||
t TestReporter
|
||||
}
|
||||
|
||||
|
@ -523,19 +596,29 @@ func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int3
|
|||
partitions = make(map[int32]*OffsetFetchResponseBlock)
|
||||
topics[topic] = partitions
|
||||
}
|
||||
partitions[partition] = &OffsetFetchResponseBlock{offset, metadata, kerror}
|
||||
partitions[partition] = &OffsetFetchResponseBlock{offset, 0, metadata, kerror}
|
||||
return mr
|
||||
}
|
||||
|
||||
func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoder {
|
||||
func (mr *MockOffsetFetchResponse) SetError(kerror KError) *MockOffsetFetchResponse {
|
||||
mr.error = kerror
|
||||
return mr
|
||||
}
|
||||
|
||||
func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoderWithHeader {
|
||||
req := reqBody.(*OffsetFetchRequest)
|
||||
group := req.ConsumerGroup
|
||||
res := &OffsetFetchResponse{}
|
||||
res := &OffsetFetchResponse{Version: req.Version}
|
||||
|
||||
for topic, partitions := range mr.offsets[group] {
|
||||
for partition, block := range partitions {
|
||||
res.AddBlock(topic, partition, block)
|
||||
}
|
||||
}
|
||||
|
||||
if res.Version >= 2 {
|
||||
res.Err = mr.error
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
|
@ -547,12 +630,22 @@ func NewMockCreateTopicsResponse(t TestReporter) *MockCreateTopicsResponse {
|
|||
return &MockCreateTopicsResponse{t: t}
|
||||
}
|
||||
|
||||
func (mr *MockCreateTopicsResponse) For(reqBody versionedDecoder) encoder {
|
||||
func (mr *MockCreateTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader {
|
||||
req := reqBody.(*CreateTopicsRequest)
|
||||
res := &CreateTopicsResponse{}
|
||||
res := &CreateTopicsResponse{
|
||||
Version: req.Version,
|
||||
}
|
||||
res.TopicErrors = make(map[string]*TopicError)
|
||||
|
||||
for topic, _ := range req.TopicDetails {
|
||||
for topic := range req.TopicDetails {
|
||||
if res.Version >= 1 && strings.HasPrefix(topic, "_") {
|
||||
msg := "insufficient permissions to create topic with reserved prefix"
|
||||
res.TopicErrors[topic] = &TopicError{
|
||||
Err: ErrTopicAuthorizationFailed,
|
||||
ErrMsg: &msg,
|
||||
}
|
||||
continue
|
||||
}
|
||||
res.TopicErrors[topic] = &TopicError{Err: ErrNoError}
|
||||
}
|
||||
return res
|
||||
|
@ -566,7 +659,7 @@ func NewMockDeleteTopicsResponse(t TestReporter) *MockDeleteTopicsResponse {
|
|||
return &MockDeleteTopicsResponse{t: t}
|
||||
}
|
||||
|
||||
func (mr *MockDeleteTopicsResponse) For(reqBody versionedDecoder) encoder {
|
||||
func (mr *MockDeleteTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader {
|
||||
req := reqBody.(*DeleteTopicsRequest)
|
||||
res := &DeleteTopicsResponse{}
|
||||
res.TopicErrorCodes = make(map[string]KError)
|
||||
|
@ -574,6 +667,7 @@ func (mr *MockDeleteTopicsResponse) For(reqBody versionedDecoder) encoder {
|
|||
for _, topic := range req.Topics {
|
||||
res.TopicErrorCodes[topic] = ErrNoError
|
||||
}
|
||||
res.Version = req.Version
|
||||
return res
|
||||
}
|
||||
|
||||
|
@ -585,17 +679,62 @@ func NewMockCreatePartitionsResponse(t TestReporter) *MockCreatePartitionsRespon
|
|||
return &MockCreatePartitionsResponse{t: t}
|
||||
}
|
||||
|
||||
func (mr *MockCreatePartitionsResponse) For(reqBody versionedDecoder) encoder {
|
||||
func (mr *MockCreatePartitionsResponse) For(reqBody versionedDecoder) encoderWithHeader {
|
||||
req := reqBody.(*CreatePartitionsRequest)
|
||||
res := &CreatePartitionsResponse{}
|
||||
res.TopicPartitionErrors = make(map[string]*TopicPartitionError)
|
||||
|
||||
for topic, _ := range req.TopicPartitions {
|
||||
for topic := range req.TopicPartitions {
|
||||
if strings.HasPrefix(topic, "_") {
|
||||
msg := "insufficient permissions to create partition on topic with reserved prefix"
|
||||
res.TopicPartitionErrors[topic] = &TopicPartitionError{
|
||||
Err: ErrTopicAuthorizationFailed,
|
||||
ErrMsg: &msg,
|
||||
}
|
||||
continue
|
||||
}
|
||||
res.TopicPartitionErrors[topic] = &TopicPartitionError{Err: ErrNoError}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
type MockAlterPartitionReassignmentsResponse struct {
|
||||
t TestReporter
|
||||
}
|
||||
|
||||
func NewMockAlterPartitionReassignmentsResponse(t TestReporter) *MockAlterPartitionReassignmentsResponse {
|
||||
return &MockAlterPartitionReassignmentsResponse{t: t}
|
||||
}
|
||||
|
||||
func (mr *MockAlterPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader {
|
||||
req := reqBody.(*AlterPartitionReassignmentsRequest)
|
||||
_ = req
|
||||
res := &AlterPartitionReassignmentsResponse{}
|
||||
return res
|
||||
}
|
||||
|
||||
type MockListPartitionReassignmentsResponse struct {
|
||||
t TestReporter
|
||||
}
|
||||
|
||||
func NewMockListPartitionReassignmentsResponse(t TestReporter) *MockListPartitionReassignmentsResponse {
|
||||
return &MockListPartitionReassignmentsResponse{t: t}
|
||||
}
|
||||
|
||||
func (mr *MockListPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader {
|
||||
req := reqBody.(*ListPartitionReassignmentsRequest)
|
||||
_ = req
|
||||
res := &ListPartitionReassignmentsResponse{}
|
||||
|
||||
for topic, partitions := range req.blocks {
|
||||
for _, partition := range partitions {
|
||||
res.AddBlock(topic, partition, []int32{0}, []int32{1}, []int32{2})
|
||||
}
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
type MockDeleteRecordsResponse struct {
|
||||
t TestReporter
|
||||
}
|
||||
|
@ -604,14 +743,14 @@ func NewMockDeleteRecordsResponse(t TestReporter) *MockDeleteRecordsResponse {
|
|||
return &MockDeleteRecordsResponse{t: t}
|
||||
}
|
||||
|
||||
func (mr *MockDeleteRecordsResponse) For(reqBody versionedDecoder) encoder {
|
||||
func (mr *MockDeleteRecordsResponse) For(reqBody versionedDecoder) encoderWithHeader {
|
||||
req := reqBody.(*DeleteRecordsRequest)
|
||||
res := &DeleteRecordsResponse{}
|
||||
res.Topics = make(map[string]*DeleteRecordsResponseTopic)
|
||||
|
||||
for topic, deleteRecordRequestTopic := range req.Topics {
|
||||
partitions := make(map[int32]*DeleteRecordsResponsePartition)
|
||||
for partition, _ := range deleteRecordRequestTopic.PartitionOffsets {
|
||||
for partition := range deleteRecordRequestTopic.PartitionOffsets {
|
||||
partitions[partition] = &DeleteRecordsResponsePartition{Err: ErrNoError}
|
||||
}
|
||||
res.Topics[topic] = &DeleteRecordsResponseTopic{Partitions: partitions}
|
||||
|
@ -627,20 +766,114 @@ func NewMockDescribeConfigsResponse(t TestReporter) *MockDescribeConfigsResponse
|
|||
return &MockDescribeConfigsResponse{t: t}
|
||||
}
|
||||
|
||||
func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoder {
|
||||
func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader {
|
||||
req := reqBody.(*DescribeConfigsRequest)
|
||||
res := &DescribeConfigsResponse{}
|
||||
res := &DescribeConfigsResponse{
|
||||
Version: req.Version,
|
||||
}
|
||||
|
||||
var configEntries []*ConfigEntry
|
||||
configEntries = append(configEntries, &ConfigEntry{Name: "my_topic",
|
||||
Value: "my_topic",
|
||||
ReadOnly: true,
|
||||
Default: true,
|
||||
Sensitive: false,
|
||||
})
|
||||
includeSynonyms := req.Version > 0
|
||||
includeSource := req.Version > 0
|
||||
|
||||
for _, r := range req.Resources {
|
||||
res.Resources = append(res.Resources, &ResourceResponse{Name: r.Name, Configs: configEntries})
|
||||
var configEntries []*ConfigEntry
|
||||
switch r.Type {
|
||||
case BrokerResource:
|
||||
configEntries = append(configEntries,
|
||||
&ConfigEntry{
|
||||
Name: "min.insync.replicas",
|
||||
Value: "2",
|
||||
ReadOnly: false,
|
||||
Default: false,
|
||||
},
|
||||
)
|
||||
res.Resources = append(res.Resources, &ResourceResponse{
|
||||
Name: r.Name,
|
||||
Configs: configEntries,
|
||||
})
|
||||
case BrokerLoggerResource:
|
||||
configEntries = append(configEntries,
|
||||
&ConfigEntry{
|
||||
Name: "kafka.controller.KafkaController",
|
||||
Value: "DEBUG",
|
||||
ReadOnly: false,
|
||||
Default: false,
|
||||
},
|
||||
)
|
||||
res.Resources = append(res.Resources, &ResourceResponse{
|
||||
Name: r.Name,
|
||||
Configs: configEntries,
|
||||
})
|
||||
case TopicResource:
|
||||
maxMessageBytes := &ConfigEntry{Name: "max.message.bytes",
|
||||
Value: "1000000",
|
||||
ReadOnly: false,
|
||||
Default: !includeSource,
|
||||
Sensitive: false,
|
||||
}
|
||||
if includeSource {
|
||||
maxMessageBytes.Source = SourceDefault
|
||||
}
|
||||
if includeSynonyms {
|
||||
maxMessageBytes.Synonyms = []*ConfigSynonym{
|
||||
{
|
||||
ConfigName: "max.message.bytes",
|
||||
ConfigValue: "500000",
|
||||
},
|
||||
}
|
||||
}
|
||||
retentionMs := &ConfigEntry{Name: "retention.ms",
|
||||
Value: "5000",
|
||||
ReadOnly: false,
|
||||
Default: false,
|
||||
Sensitive: false,
|
||||
}
|
||||
if includeSynonyms {
|
||||
retentionMs.Synonyms = []*ConfigSynonym{
|
||||
{
|
||||
ConfigName: "log.retention.ms",
|
||||
ConfigValue: "2500",
|
||||
},
|
||||
}
|
||||
}
|
||||
password := &ConfigEntry{Name: "password",
|
||||
Value: "12345",
|
||||
ReadOnly: false,
|
||||
Default: false,
|
||||
Sensitive: true,
|
||||
}
|
||||
configEntries = append(
|
||||
configEntries, maxMessageBytes, retentionMs, password)
|
||||
res.Resources = append(res.Resources, &ResourceResponse{
|
||||
Name: r.Name,
|
||||
Configs: configEntries,
|
||||
})
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
type MockDescribeConfigsResponseWithErrorCode struct {
|
||||
t TestReporter
|
||||
}
|
||||
|
||||
func NewMockDescribeConfigsResponseWithErrorCode(t TestReporter) *MockDescribeConfigsResponseWithErrorCode {
|
||||
return &MockDescribeConfigsResponseWithErrorCode{t: t}
|
||||
}
|
||||
|
||||
func (mr *MockDescribeConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader {
|
||||
req := reqBody.(*DescribeConfigsRequest)
|
||||
res := &DescribeConfigsResponse{
|
||||
Version: req.Version,
|
||||
}
|
||||
|
||||
for _, r := range req.Resources {
|
||||
res.Resources = append(res.Resources, &ResourceResponse{
|
||||
Name: r.Name,
|
||||
Type: r.Type,
|
||||
ErrorCode: 83,
|
||||
ErrorMsg: "",
|
||||
})
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
@ -653,19 +886,42 @@ func NewMockAlterConfigsResponse(t TestReporter) *MockAlterConfigsResponse {
|
|||
return &MockAlterConfigsResponse{t: t}
|
||||
}
|
||||
|
||||
func (mr *MockAlterConfigsResponse) For(reqBody versionedDecoder) encoder {
|
||||
func (mr *MockAlterConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader {
|
||||
req := reqBody.(*AlterConfigsRequest)
|
||||
res := &AlterConfigsResponse{}
|
||||
|
||||
for _, r := range req.Resources {
|
||||
res.Resources = append(res.Resources, &AlterConfigsResourceResponse{Name: r.Name,
|
||||
Type: TopicResource,
|
||||
Type: r.Type,
|
||||
ErrorMsg: "",
|
||||
})
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
type MockAlterConfigsResponseWithErrorCode struct {
|
||||
t TestReporter
|
||||
}
|
||||
|
||||
func NewMockAlterConfigsResponseWithErrorCode(t TestReporter) *MockAlterConfigsResponseWithErrorCode {
|
||||
return &MockAlterConfigsResponseWithErrorCode{t: t}
|
||||
}
|
||||
|
||||
func (mr *MockAlterConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader {
|
||||
req := reqBody.(*AlterConfigsRequest)
|
||||
res := &AlterConfigsResponse{}
|
||||
|
||||
for _, r := range req.Resources {
|
||||
res.Resources = append(res.Resources, &AlterConfigsResourceResponse{
|
||||
Name: r.Name,
|
||||
Type: r.Type,
|
||||
ErrorCode: 83,
|
||||
ErrorMsg: "",
|
||||
})
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
type MockCreateAclsResponse struct {
|
||||
t TestReporter
|
||||
}
|
||||
|
@ -674,7 +930,7 @@ func NewMockCreateAclsResponse(t TestReporter) *MockCreateAclsResponse {
|
|||
return &MockCreateAclsResponse{t: t}
|
||||
}
|
||||
|
||||
func (mr *MockCreateAclsResponse) For(reqBody versionedDecoder) encoder {
|
||||
func (mr *MockCreateAclsResponse) For(reqBody versionedDecoder) encoderWithHeader {
|
||||
req := reqBody.(*CreateAclsRequest)
|
||||
res := &CreateAclsResponse{}
|
||||
|
||||
|
@ -692,29 +948,101 @@ func NewMockListAclsResponse(t TestReporter) *MockListAclsResponse {
|
|||
return &MockListAclsResponse{t: t}
|
||||
}
|
||||
|
||||
func (mr *MockListAclsResponse) For(reqBody versionedDecoder) encoder {
|
||||
func (mr *MockListAclsResponse) For(reqBody versionedDecoder) encoderWithHeader {
|
||||
req := reqBody.(*DescribeAclsRequest)
|
||||
res := &DescribeAclsResponse{}
|
||||
|
||||
res.Err = ErrNoError
|
||||
acl := &ResourceAcls{}
|
||||
acl.Resource.ResourceName = *req.ResourceName
|
||||
if req.ResourceName != nil {
|
||||
acl.Resource.ResourceName = *req.ResourceName
|
||||
}
|
||||
acl.Resource.ResourcePatternType = req.ResourcePatternTypeFilter
|
||||
acl.Resource.ResourceType = req.ResourceType
|
||||
acl.Acls = append(acl.Acls, &Acl{})
|
||||
res.ResourceAcls = append(res.ResourceAcls, acl)
|
||||
|
||||
host := "*"
|
||||
if req.Host != nil {
|
||||
host = *req.Host
|
||||
}
|
||||
|
||||
principal := "User:test"
|
||||
if req.Principal != nil {
|
||||
principal = *req.Principal
|
||||
}
|
||||
|
||||
permissionType := req.PermissionType
|
||||
if permissionType == AclPermissionAny {
|
||||
permissionType = AclPermissionAllow
|
||||
}
|
||||
|
||||
acl.Acls = append(acl.Acls, &Acl{Operation: req.Operation, PermissionType: permissionType, Host: host, Principal: principal})
|
||||
res.ResourceAcls = append(res.ResourceAcls, acl)
|
||||
res.Version = int16(req.Version)
|
||||
return res
|
||||
}
|
||||
|
||||
type MockSaslAuthenticateResponse struct {
|
||||
t TestReporter
|
||||
kerror KError
|
||||
saslAuthBytes []byte
|
||||
}
|
||||
|
||||
func NewMockSaslAuthenticateResponse(t TestReporter) *MockSaslAuthenticateResponse {
|
||||
return &MockSaslAuthenticateResponse{t: t}
|
||||
}
|
||||
|
||||
func (msar *MockSaslAuthenticateResponse) For(reqBody versionedDecoder) encoderWithHeader {
|
||||
res := &SaslAuthenticateResponse{}
|
||||
res.Err = msar.kerror
|
||||
res.SaslAuthBytes = msar.saslAuthBytes
|
||||
return res
|
||||
}
|
||||
|
||||
func (msar *MockSaslAuthenticateResponse) SetError(kerror KError) *MockSaslAuthenticateResponse {
|
||||
msar.kerror = kerror
|
||||
return msar
|
||||
}
|
||||
|
||||
func (msar *MockSaslAuthenticateResponse) SetAuthBytes(saslAuthBytes []byte) *MockSaslAuthenticateResponse {
|
||||
msar.saslAuthBytes = saslAuthBytes
|
||||
return msar
|
||||
}
|
||||
|
||||
type MockDeleteAclsResponse struct {
|
||||
t TestReporter
|
||||
}
|
||||
|
||||
type MockSaslHandshakeResponse struct {
|
||||
enabledMechanisms []string
|
||||
kerror KError
|
||||
t TestReporter
|
||||
}
|
||||
|
||||
func NewMockSaslHandshakeResponse(t TestReporter) *MockSaslHandshakeResponse {
|
||||
return &MockSaslHandshakeResponse{t: t}
|
||||
}
|
||||
|
||||
func (mshr *MockSaslHandshakeResponse) For(reqBody versionedDecoder) encoderWithHeader {
|
||||
res := &SaslHandshakeResponse{}
|
||||
res.Err = mshr.kerror
|
||||
res.EnabledMechanisms = mshr.enabledMechanisms
|
||||
return res
|
||||
}
|
||||
|
||||
func (mshr *MockSaslHandshakeResponse) SetError(kerror KError) *MockSaslHandshakeResponse {
|
||||
mshr.kerror = kerror
|
||||
return mshr
|
||||
}
|
||||
|
||||
func (mshr *MockSaslHandshakeResponse) SetEnabledMechanisms(enabledMechanisms []string) *MockSaslHandshakeResponse {
|
||||
mshr.enabledMechanisms = enabledMechanisms
|
||||
return mshr
|
||||
}
|
||||
|
||||
func NewMockDeleteAclsResponse(t TestReporter) *MockDeleteAclsResponse {
|
||||
return &MockDeleteAclsResponse{t: t}
|
||||
}
|
||||
|
||||
func (mr *MockDeleteAclsResponse) For(reqBody versionedDecoder) encoder {
|
||||
func (mr *MockDeleteAclsResponse) For(reqBody versionedDecoder) encoderWithHeader {
|
||||
req := reqBody.(*DeleteAclsRequest)
|
||||
res := &DeleteAclsResponse{}
|
||||
|
||||
|
@ -723,5 +1051,71 @@ func (mr *MockDeleteAclsResponse) For(reqBody versionedDecoder) encoder {
|
|||
response.MatchingAcls = append(response.MatchingAcls, &MatchingAcl{Err: ErrNoError})
|
||||
res.FilterResponses = append(res.FilterResponses, response)
|
||||
}
|
||||
res.Version = int16(req.Version)
|
||||
return res
|
||||
}
|
||||
|
||||
type MockDeleteGroupsResponse struct {
|
||||
deletedGroups []string
|
||||
}
|
||||
|
||||
func NewMockDeleteGroupsRequest(t TestReporter) *MockDeleteGroupsResponse {
|
||||
return &MockDeleteGroupsResponse{}
|
||||
}
|
||||
|
||||
func (m *MockDeleteGroupsResponse) SetDeletedGroups(groups []string) *MockDeleteGroupsResponse {
|
||||
m.deletedGroups = groups
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *MockDeleteGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader {
|
||||
resp := &DeleteGroupsResponse{
|
||||
GroupErrorCodes: map[string]KError{},
|
||||
}
|
||||
for _, group := range m.deletedGroups {
|
||||
resp.GroupErrorCodes[group] = ErrNoError
|
||||
}
|
||||
return resp
|
||||
}
|
||||
|
||||
type MockDescribeLogDirsResponse struct {
|
||||
t TestReporter
|
||||
logDirs []DescribeLogDirsResponseDirMetadata
|
||||
}
|
||||
|
||||
func NewMockDescribeLogDirsResponse(t TestReporter) *MockDescribeLogDirsResponse {
|
||||
return &MockDescribeLogDirsResponse{t: t}
|
||||
}
|
||||
|
||||
func (m *MockDescribeLogDirsResponse) SetLogDirs(logDirPath string, topicPartitions map[string]int) *MockDescribeLogDirsResponse {
|
||||
var topics []DescribeLogDirsResponseTopic
|
||||
for topic := range topicPartitions {
|
||||
var partitions []DescribeLogDirsResponsePartition
|
||||
for i := 0; i < topicPartitions[topic]; i++ {
|
||||
partitions = append(partitions, DescribeLogDirsResponsePartition{
|
||||
PartitionID: int32(i),
|
||||
IsTemporary: false,
|
||||
OffsetLag: int64(0),
|
||||
Size: int64(1234),
|
||||
})
|
||||
}
|
||||
topics = append(topics, DescribeLogDirsResponseTopic{
|
||||
Topic: topic,
|
||||
Partitions: partitions,
|
||||
})
|
||||
}
|
||||
logDir := DescribeLogDirsResponseDirMetadata{
|
||||
ErrorCode: ErrNoError,
|
||||
Path: logDirPath,
|
||||
Topics: topics,
|
||||
}
|
||||
m.logDirs = []DescribeLogDirsResponseDirMetadata{logDir}
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *MockDescribeLogDirsResponse) For(reqBody versionedDecoder) encoderWithHeader {
|
||||
resp := &DescribeLogDirsResponse{
|
||||
LogDirs: m.logDirs,
|
||||
}
|
||||
return resp
|
||||
}
|
||||
|
|
|
@ -52,12 +52,14 @@ type OffsetCommitRequest struct {
|
|||
// - 0 (kafka 0.8.1 and later)
|
||||
// - 1 (kafka 0.8.2 and later)
|
||||
// - 2 (kafka 0.9.0 and later)
|
||||
// - 3 (kafka 0.11.0 and later)
|
||||
// - 4 (kafka 2.0.0 and later)
|
||||
Version int16
|
||||
blocks map[string]map[int32]*offsetCommitRequestBlock
|
||||
}
|
||||
|
||||
func (r *OffsetCommitRequest) encode(pe packetEncoder) error {
|
||||
if r.Version < 0 || r.Version > 2 {
|
||||
if r.Version < 0 || r.Version > 4 {
|
||||
return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"}
|
||||
}
|
||||
|
||||
|
@ -168,12 +170,20 @@ func (r *OffsetCommitRequest) version() int16 {
|
|||
return r.Version
|
||||
}
|
||||
|
||||
func (r *OffsetCommitRequest) headerVersion() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (r *OffsetCommitRequest) requiredVersion() KafkaVersion {
|
||||
switch r.Version {
|
||||
case 1:
|
||||
return V0_8_2_0
|
||||
case 2:
|
||||
return V0_9_0_0
|
||||
case 3:
|
||||
return V0_11_0_0
|
||||
case 4:
|
||||
return V2_0_0_0
|
||||
default:
|
||||
return MinVersion
|
||||
}
|
||||
|
@ -194,11 +204,11 @@ func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset i
|
|||
func (r *OffsetCommitRequest) Offset(topic string, partitionID int32) (int64, string, error) {
|
||||
partitions := r.blocks[topic]
|
||||
if partitions == nil {
|
||||
return 0, "", errors.New("No such offset")
|
||||
return 0, "", errors.New("no such offset")
|
||||
}
|
||||
block := partitions[partitionID]
|
||||
if block == nil {
|
||||
return 0, "", errors.New("No such offset")
|
||||
return 0, "", errors.New("no such offset")
|
||||
}
|
||||
return block.offset, block.metadata, nil
|
||||
}
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
package sarama
|
||||
|
||||
type OffsetCommitResponse struct {
|
||||
Errors map[string]map[int32]KError
|
||||
Version int16
|
||||
ThrottleTimeMs int32
|
||||
Errors map[string]map[int32]KError
|
||||
}
|
||||
|
||||
func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) {
|
||||
|
@ -17,6 +19,9 @@ func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KE
|
|||
}
|
||||
|
||||
func (r *OffsetCommitResponse) encode(pe packetEncoder) error {
|
||||
if r.Version >= 3 {
|
||||
pe.putInt32(r.ThrottleTimeMs)
|
||||
}
|
||||
if err := pe.putArrayLength(len(r.Errors)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -36,6 +41,15 @@ func (r *OffsetCommitResponse) encode(pe packetEncoder) error {
|
|||
}
|
||||
|
||||
func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
r.Version = version
|
||||
|
||||
if version >= 3 {
|
||||
r.ThrottleTimeMs, err = pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
numTopics, err := pd.getArrayLength()
|
||||
if err != nil || numTopics == 0 {
|
||||
return err
|
||||
|
@ -77,9 +91,24 @@ func (r *OffsetCommitResponse) key() int16 {
|
|||
}
|
||||
|
||||
func (r *OffsetCommitResponse) version() int16 {
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *OffsetCommitResponse) headerVersion() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *OffsetCommitResponse) requiredVersion() KafkaVersion {
|
||||
return MinVersion
|
||||
switch r.Version {
|
||||
case 1:
|
||||
return V0_8_2_0
|
||||
case 2:
|
||||
return V0_9_0_0
|
||||
case 3:
|
||||
return V0_11_0_0
|
||||
case 4:
|
||||
return V2_0_0_0
|
||||
default:
|
||||
return MinVersion
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,28 +1,33 @@
|
|||
package sarama
|
||||
|
||||
type OffsetFetchRequest struct {
|
||||
ConsumerGroup string
|
||||
Version int16
|
||||
ConsumerGroup string
|
||||
partitions map[string][]int32
|
||||
}
|
||||
|
||||
func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) {
|
||||
if r.Version < 0 || r.Version > 1 {
|
||||
if r.Version < 0 || r.Version > 5 {
|
||||
return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"}
|
||||
}
|
||||
|
||||
if err = pe.putString(r.ConsumerGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = pe.putArrayLength(len(r.partitions)); err != nil {
|
||||
return err
|
||||
}
|
||||
for topic, partitions := range r.partitions {
|
||||
if err = pe.putString(topic); err != nil {
|
||||
|
||||
if r.Version >= 2 && r.partitions == nil {
|
||||
pe.putInt32(-1)
|
||||
} else {
|
||||
if err = pe.putArrayLength(len(r.partitions)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = pe.putInt32Array(partitions); err != nil {
|
||||
return err
|
||||
for topic, partitions := range r.partitions {
|
||||
if err = pe.putString(topic); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = pe.putInt32Array(partitions); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -37,7 +42,7 @@ func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error)
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if partitionCount == 0 {
|
||||
if (partitionCount == 0 && version < 2) || partitionCount < 0 {
|
||||
return nil
|
||||
}
|
||||
r.partitions = make(map[string][]int32)
|
||||
|
@ -63,15 +68,33 @@ func (r *OffsetFetchRequest) version() int16 {
|
|||
return r.Version
|
||||
}
|
||||
|
||||
func (r *OffsetFetchRequest) headerVersion() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (r *OffsetFetchRequest) requiredVersion() KafkaVersion {
|
||||
switch r.Version {
|
||||
case 1:
|
||||
return V0_8_2_0
|
||||
case 2:
|
||||
return V0_10_2_0
|
||||
case 3:
|
||||
return V0_11_0_0
|
||||
case 4:
|
||||
return V2_0_0_0
|
||||
case 5:
|
||||
return V2_1_0_0
|
||||
default:
|
||||
return MinVersion
|
||||
}
|
||||
}
|
||||
|
||||
func (r *OffsetFetchRequest) ZeroPartitions() {
|
||||
if r.partitions == nil && r.Version >= 2 {
|
||||
r.partitions = make(map[string][]int32)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) {
|
||||
if r.partitions == nil {
|
||||
r.partitions = make(map[string][]int32)
|
||||
|
|
|
@ -1,17 +1,25 @@
|
|||
package sarama
|
||||
|
||||
type OffsetFetchResponseBlock struct {
|
||||
Offset int64
|
||||
Metadata string
|
||||
Err KError
|
||||
Offset int64
|
||||
LeaderEpoch int32
|
||||
Metadata string
|
||||
Err KError
|
||||
}
|
||||
|
||||
func (b *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) {
|
||||
func (b *OffsetFetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
|
||||
b.Offset, err = pd.getInt64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if version >= 5 {
|
||||
b.LeaderEpoch, err = pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
b.Metadata, err = pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -26,9 +34,13 @@ func (b *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (b *OffsetFetchResponseBlock) encode(pe packetEncoder) (err error) {
|
||||
func (b *OffsetFetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
|
||||
pe.putInt64(b.Offset)
|
||||
|
||||
if version >= 5 {
|
||||
pe.putInt32(b.LeaderEpoch)
|
||||
}
|
||||
|
||||
err = pe.putString(b.Metadata)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -40,10 +52,17 @@ func (b *OffsetFetchResponseBlock) encode(pe packetEncoder) (err error) {
|
|||
}
|
||||
|
||||
type OffsetFetchResponse struct {
|
||||
Blocks map[string]map[int32]*OffsetFetchResponseBlock
|
||||
Version int16
|
||||
ThrottleTimeMs int32
|
||||
Blocks map[string]map[int32]*OffsetFetchResponseBlock
|
||||
Err KError
|
||||
}
|
||||
|
||||
func (r *OffsetFetchResponse) encode(pe packetEncoder) error {
|
||||
if r.Version >= 3 {
|
||||
pe.putInt32(r.ThrottleTimeMs)
|
||||
}
|
||||
|
||||
if err := pe.putArrayLength(len(r.Blocks)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -56,53 +75,75 @@ func (r *OffsetFetchResponse) encode(pe packetEncoder) error {
|
|||
}
|
||||
for partition, block := range partitions {
|
||||
pe.putInt32(partition)
|
||||
if err := block.encode(pe); err != nil {
|
||||
if err := block.encode(pe, r.Version); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if r.Version >= 2 {
|
||||
pe.putInt16(int16(r.Err))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
r.Version = version
|
||||
|
||||
if version >= 3 {
|
||||
r.ThrottleTimeMs, err = pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
numTopics, err := pd.getArrayLength()
|
||||
if err != nil || numTopics == 0 {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics)
|
||||
for i := 0; i < numTopics; i++ {
|
||||
name, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numBlocks, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if numBlocks == 0 {
|
||||
r.Blocks[name] = nil
|
||||
continue
|
||||
}
|
||||
r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks)
|
||||
|
||||
for j := 0; j < numBlocks; j++ {
|
||||
id, err := pd.getInt32()
|
||||
if numTopics > 0 {
|
||||
r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics)
|
||||
for i := 0; i < numTopics; i++ {
|
||||
name, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
block := new(OffsetFetchResponseBlock)
|
||||
err = block.decode(pd)
|
||||
numBlocks, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Blocks[name][id] = block
|
||||
|
||||
if numBlocks == 0 {
|
||||
r.Blocks[name] = nil
|
||||
continue
|
||||
}
|
||||
r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks)
|
||||
|
||||
for j := 0; j < numBlocks; j++ {
|
||||
id, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
block := new(OffsetFetchResponseBlock)
|
||||
err = block.decode(pd, version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Blocks[name][id] = block
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if version >= 2 {
|
||||
kerr, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Err = KError(kerr)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -111,11 +152,28 @@ func (r *OffsetFetchResponse) key() int16 {
|
|||
}
|
||||
|
||||
func (r *OffsetFetchResponse) version() int16 {
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *OffsetFetchResponse) headerVersion() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *OffsetFetchResponse) requiredVersion() KafkaVersion {
|
||||
return MinVersion
|
||||
switch r.Version {
|
||||
case 1:
|
||||
return V0_8_2_0
|
||||
case 2:
|
||||
return V0_10_2_0
|
||||
case 3:
|
||||
return V0_11_0_0
|
||||
case 4:
|
||||
return V2_0_0_0
|
||||
case 5:
|
||||
return V2_1_0_0
|
||||
default:
|
||||
return MinVersion
|
||||
}
|
||||
}
|
||||
|
||||
func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock {
|
||||
|
|
|
@ -19,6 +19,10 @@ type OffsetManager interface {
|
|||
// will otherwise leak memory. You must call this after all the
|
||||
// PartitionOffsetManagers are closed.
|
||||
Close() error
|
||||
|
||||
// Commit commits the offsets. This method can be used if AutoCommit.Enable is
|
||||
// set to false.
|
||||
Commit()
|
||||
}
|
||||
|
||||
type offsetManager struct {
|
||||
|
@ -58,7 +62,6 @@ func newOffsetManagerFromClient(group, memberID string, generation int32, client
|
|||
client: client,
|
||||
conf: conf,
|
||||
group: group,
|
||||
ticker: time.NewTicker(conf.Consumer.Offsets.CommitInterval),
|
||||
poms: make(map[string]map[int32]*partitionOffsetManager),
|
||||
|
||||
memberID: memberID,
|
||||
|
@ -67,7 +70,10 @@ func newOffsetManagerFromClient(group, memberID string, generation int32, client
|
|||
closing: make(chan none),
|
||||
closed: make(chan none),
|
||||
}
|
||||
go withRecover(om.mainLoop)
|
||||
if conf.Consumer.Offsets.AutoCommit.Enable {
|
||||
om.ticker = time.NewTicker(conf.Consumer.Offsets.AutoCommit.Interval)
|
||||
go withRecover(om.mainLoop)
|
||||
}
|
||||
|
||||
return om, nil
|
||||
}
|
||||
|
@ -99,7 +105,9 @@ func (om *offsetManager) Close() error {
|
|||
om.closeOnce.Do(func() {
|
||||
// exit the mainLoop
|
||||
close(om.closing)
|
||||
<-om.closed
|
||||
if om.conf.Consumer.Offsets.AutoCommit.Enable {
|
||||
<-om.closed
|
||||
}
|
||||
|
||||
// mark all POMs as closed
|
||||
om.asyncClosePOMs()
|
||||
|
@ -120,6 +128,14 @@ func (om *offsetManager) Close() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (om *offsetManager) computeBackoff(retries int) time.Duration {
|
||||
if om.conf.Metadata.Retry.BackoffFunc != nil {
|
||||
return om.conf.Metadata.Retry.BackoffFunc(retries, om.conf.Metadata.Retry.Max)
|
||||
} else {
|
||||
return om.conf.Metadata.Retry.Backoff
|
||||
}
|
||||
}
|
||||
|
||||
func (om *offsetManager) fetchInitialOffset(topic string, partition int32, retries int) (int64, string, error) {
|
||||
broker, err := om.coordinator()
|
||||
if err != nil {
|
||||
|
@ -161,10 +177,11 @@ func (om *offsetManager) fetchInitialOffset(topic string, partition int32, retri
|
|||
if retries <= 0 {
|
||||
return 0, "", block.Err
|
||||
}
|
||||
backoff := om.computeBackoff(retries)
|
||||
select {
|
||||
case <-om.closing:
|
||||
return 0, "", block.Err
|
||||
case <-time.After(om.conf.Metadata.Retry.Backoff):
|
||||
case <-time.After(backoff):
|
||||
}
|
||||
return om.fetchInitialOffset(topic, partition, retries-1)
|
||||
default:
|
||||
|
@ -216,14 +233,18 @@ func (om *offsetManager) mainLoop() {
|
|||
for {
|
||||
select {
|
||||
case <-om.ticker.C:
|
||||
om.flushToBroker()
|
||||
om.releasePOMs(false)
|
||||
om.Commit()
|
||||
case <-om.closing:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (om *offsetManager) Commit() {
|
||||
om.flushToBroker()
|
||||
om.releasePOMs(false)
|
||||
}
|
||||
|
||||
func (om *offsetManager) flushToBroker() {
|
||||
req := om.constructRequest()
|
||||
if req == nil {
|
||||
|
@ -266,7 +287,6 @@ func (om *offsetManager) constructRequest() *OffsetCommitRequest {
|
|||
ConsumerID: om.memberID,
|
||||
ConsumerGroupGeneration: om.generation,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
om.pomsLock.RLock()
|
||||
|
@ -324,7 +344,6 @@ func (om *offsetManager) handleResponse(broker *Broker, req *OffsetCommitRequest
|
|||
pom.handleError(err)
|
||||
case ErrOffsetsLoadInProgress:
|
||||
// nothing wrong but we didn't commit, we'll get it next time round
|
||||
break
|
||||
case ErrUnknownTopicOrPartition:
|
||||
// let the user know *and* try redispatching - if topic-auto-create is
|
||||
// enabled, redispatching should trigger a metadata req and create the
|
||||
|
@ -567,6 +586,6 @@ func (pom *partitionOffsetManager) handleError(err error) {
|
|||
|
||||
func (pom *partitionOffsetManager) release() {
|
||||
pom.releaseOnce.Do(func() {
|
||||
go close(pom.errors)
|
||||
close(pom.errors)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@ type offsetRequestBlock struct {
|
|||
}
|
||||
|
||||
func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error {
|
||||
pe.putInt64(int64(b.time))
|
||||
pe.putInt64(b.time)
|
||||
if version == 0 {
|
||||
pe.putInt32(b.maxOffsets)
|
||||
}
|
||||
|
@ -27,12 +27,20 @@ func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error)
|
|||
}
|
||||
|
||||
type OffsetRequest struct {
|
||||
Version int16
|
||||
blocks map[string]map[int32]*offsetRequestBlock
|
||||
Version int16
|
||||
replicaID int32
|
||||
isReplicaIDSet bool
|
||||
blocks map[string]map[int32]*offsetRequestBlock
|
||||
}
|
||||
|
||||
func (r *OffsetRequest) encode(pe packetEncoder) error {
|
||||
pe.putInt32(-1) // replica ID is always -1 for clients
|
||||
if r.isReplicaIDSet {
|
||||
pe.putInt32(r.replicaID)
|
||||
} else {
|
||||
// default replica ID is always -1 for clients
|
||||
pe.putInt32(-1)
|
||||
}
|
||||
|
||||
err := pe.putArrayLength(len(r.blocks))
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -59,10 +67,14 @@ func (r *OffsetRequest) encode(pe packetEncoder) error {
|
|||
func (r *OffsetRequest) decode(pd packetDecoder, version int16) error {
|
||||
r.Version = version
|
||||
|
||||
// Ignore replica ID
|
||||
if _, err := pd.getInt32(); err != nil {
|
||||
replicaID, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if replicaID >= 0 {
|
||||
r.SetReplicaID(replicaID)
|
||||
}
|
||||
|
||||
blockCount, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -104,6 +116,10 @@ func (r *OffsetRequest) version() int16 {
|
|||
return r.Version
|
||||
}
|
||||
|
||||
func (r *OffsetRequest) headerVersion() int16 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (r *OffsetRequest) requiredVersion() KafkaVersion {
|
||||
switch r.Version {
|
||||
case 1:
|
||||
|
@ -113,6 +129,18 @@ func (r *OffsetRequest) requiredVersion() KafkaVersion {
|
|||
}
|
||||
}
|
||||
|
||||
func (r *OffsetRequest) SetReplicaID(id int32) {
|
||||
r.replicaID = id
|
||||
r.isReplicaIDSet = true
|
||||
}
|
||||
|
||||
func (r *OffsetRequest) ReplicaID() int32 {
|
||||
if r.isReplicaIDSet {
|
||||
return r.replicaID
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) {
|
||||
if r.blocks == nil {
|
||||
r.blocks = make(map[string]map[int32]*offsetRequestBlock)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue