winlin

add producer api messages.

@@ -102,7 +102,7 @@ std::string srs_kafka_summary_partitions(const vector<SrsKafkaPartition*>& parti @@ -102,7 +102,7 @@ std::string srs_kafka_summary_partitions(const vector<SrsKafkaPartition*>& parti
102 return srs_join_vector_string(ret, ", "); 102 return srs_join_vector_string(ret, ", ");
103 } 103 }
104 104
105 -void srs_kafka_metadata2connector(SrsKafkaTopicMetadataResponse* metadata, vector<SrsKafkaPartition*>& partitions) 105 +void srs_kafka_metadata2connector(string topic_name, SrsKafkaTopicMetadataResponse* metadata, vector<SrsKafkaPartition*>& partitions)
106 { 106 {
107 for (int i = 0; i < metadata->metadatas.size(); i++) { 107 for (int i = 0; i < metadata->metadatas.size(); i++) {
108 SrsKafkaTopicMetadata* topic = metadata->metadatas.at(i); 108 SrsKafkaTopicMetadata* topic = metadata->metadatas.at(i);
@@ -111,6 +111,8 @@ void srs_kafka_metadata2connector(SrsKafkaTopicMetadataResponse* metadata, vecto @@ -111,6 +111,8 @@ void srs_kafka_metadata2connector(SrsKafkaTopicMetadataResponse* metadata, vecto
111 SrsKafkaPartitionMetadata* partition = topic->metadatas.at(j); 111 SrsKafkaPartitionMetadata* partition = topic->metadatas.at(j);
112 112
113 SrsKafkaPartition* p = new SrsKafkaPartition(); 113 SrsKafkaPartition* p = new SrsKafkaPartition();
  114 +
  115 + p->topic = topic_name;
114 p->id = partition->partition_id; 116 p->id = partition->partition_id;
115 p->broker = partition->leader; 117 p->broker = partition->leader;
116 118
@@ -171,6 +173,11 @@ int SrsKafkaPartition::connect() @@ -171,6 +173,11 @@ int SrsKafkaPartition::connect()
171 return ret; 173 return ret;
172 } 174 }
173 175
  176 +int SrsKafkaPartition::flush(SrsKafkaPartitionCache* pc)
  177 +{
  178 + return kafka->write_messages(topic, id, *pc);
  179 +}
  180 +
174 SrsKafkaMessage::SrsKafkaMessage(int k) 181 SrsKafkaMessage::SrsKafkaMessage(int k)
175 { 182 {
176 key = k; 183 key = k;
@@ -290,11 +297,15 @@ int SrsKafkaCache::flush(SrsKafkaPartition* partition, int key, SrsKafkaPartitio @@ -290,11 +297,15 @@ int SrsKafkaCache::flush(SrsKafkaPartition* partition, int key, SrsKafkaPartitio
290 297
291 // connect transport. 298 // connect transport.
292 if ((ret = partition->connect()) != ERROR_SUCCESS) { 299 if ((ret = partition->connect()) != ERROR_SUCCESS) {
293 - srs_error("connect to partition failed. ret=%d", ret); 300 + srs_error("kafka connect to partition failed. ret=%d", ret);
294 return ret; 301 return ret;
295 } 302 }
296 303
297 - // TODO: FIXME: implements it. 304 + // write the json objects.
  305 + if ((ret = partition->flush(pc)) != ERROR_SUCCESS) {
  306 + srs_error("kafka write messages failed. ret=%d", ret);
  307 + return ret;
  308 + }
298 309
299 // free all wrote messages. 310 // free all wrote messages.
300 for (vector<SrsJsonObject*>::iterator it = pc->begin(); it != pc->end(); ++it) { 311 for (vector<SrsJsonObject*>::iterator it = pc->begin(); it != pc->end(); ++it) {
@@ -546,7 +557,7 @@ int SrsKafkaProducer::request_metadata() @@ -546,7 +557,7 @@ int SrsKafkaProducer::request_metadata()
546 srs_trace("kafka metadata: %s", summary.c_str()); 557 srs_trace("kafka metadata: %s", summary.c_str());
547 558
548 // generate the partition info. 559 // generate the partition info.
549 - srs_kafka_metadata2connector(metadata, partitions); 560 + srs_kafka_metadata2connector(topic, metadata, partitions);
550 srs_trace("kafka connector: %s", srs_kafka_summary_partitions(partitions).c_str()); 561 srs_trace("kafka connector: %s", srs_kafka_summary_partitions(partitions).c_str());
551 562
552 // update the total partition for cache. 563 // update the total partition for cache.
@@ -46,6 +46,11 @@ class SrsKafkaProducer; @@ -46,6 +46,11 @@ class SrsKafkaProducer;
46 #ifdef SRS_AUTO_KAFKA 46 #ifdef SRS_AUTO_KAFKA
47 47
48 /** 48 /**
  49 + * the partition messages cache.
  50 + */
  51 +typedef std::vector<SrsJsonObject*> SrsKafkaPartitionCache;
  52 +
  53 +/**
49 * the kafka partition info. 54 * the kafka partition info.
50 */ 55 */
51 struct SrsKafkaPartition 56 struct SrsKafkaPartition
@@ -56,6 +61,7 @@ private: @@ -56,6 +61,7 @@ private:
56 SrsKafkaClient* kafka; 61 SrsKafkaClient* kafka;
57 public: 62 public:
58 int id; 63 int id;
  64 + std::string topic;
59 // leader. 65 // leader.
60 int broker; 66 int broker;
61 std::string host; 67 std::string host;
@@ -66,6 +72,7 @@ public: @@ -66,6 +72,7 @@ public:
66 public: 72 public:
67 virtual std::string hostport(); 73 virtual std::string hostport();
68 virtual int connect(); 74 virtual int connect();
  75 + virtual int flush(SrsKafkaPartitionCache* pc);
69 }; 76 };
70 77
71 /** 78 /**
@@ -95,11 +102,6 @@ public: @@ -95,11 +102,6 @@ public:
95 }; 102 };
96 103
97 /** 104 /**
98 - * the partition messages cache.  
99 - */  
100 -typedef std::vector<SrsJsonObject*> SrsKafkaPartitionCache;  
101 -  
102 -/**  
103 * a message cache for kafka. 105 * a message cache for kafka.
104 */ 106 */
105 class SrsKafkaCache 107 class SrsKafkaCache
@@ -274,6 +274,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. @@ -274,6 +274,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
274 #define ERROR_KAFKA_CODEC_RESPONSE 4033 274 #define ERROR_KAFKA_CODEC_RESPONSE 4033
275 #define ERROR_KAFKA_CODEC_ARRAY 4034 275 #define ERROR_KAFKA_CODEC_ARRAY 4034
276 #define ERROR_KAFKA_CODEC_METADATA 4035 276 #define ERROR_KAFKA_CODEC_METADATA 4035
  277 +#define ERROR_KAFKA_CODEC_MESSAGE 4036
  278 +#define ERROR_KAFKA_CODEC_PRODUCER 4037
277 279
278 /////////////////////////////////////////////////////// 280 ///////////////////////////////////////////////////////
279 // HTTP API error. 281 // HTTP API error.
@@ -33,6 +33,7 @@ using namespace std; @@ -33,6 +33,7 @@ using namespace std;
33 #include <srs_protocol_stream.hpp> 33 #include <srs_protocol_stream.hpp>
34 #include <srs_kernel_utility.hpp> 34 #include <srs_kernel_utility.hpp>
35 #include <srs_protocol_utility.hpp> 35 #include <srs_protocol_utility.hpp>
  36 +#include <srs_protocol_json.hpp>
36 37
37 #ifdef SRS_AUTO_KAFKA 38 #ifdef SRS_AUTO_KAFKA
38 39
@@ -478,6 +479,67 @@ SrsKafkaRawMessage::~SrsKafkaRawMessage() @@ -478,6 +479,67 @@ SrsKafkaRawMessage::~SrsKafkaRawMessage()
478 srs_freep(value); 479 srs_freep(value);
479 } 480 }
480 481
  482 +int SrsKafkaRawMessage::nb_bytes()
  483 +{
  484 + return 8 + 4 + 4 + 1 + 1 + key->nb_bytes() + value->nb_bytes();
  485 +}
  486 +
  487 +int SrsKafkaRawMessage::encode(SrsBuffer* buf)
  488 +{
  489 + int ret = ERROR_SUCCESS;
  490 +
  491 + if (!buf->require(8 + 4 + 4 + 1 + 1)) {
  492 + ret = ERROR_KAFKA_CODEC_MESSAGE;
  493 + srs_error("kafka encode message failed. ret=%d", ret);
  494 + return ret;
  495 + }
  496 + buf->write_8bytes(offset);
  497 + buf->write_4bytes(message_size);
  498 + buf->write_4bytes(crc);
  499 + buf->write_1bytes(magic_byte);
  500 + buf->write_1bytes(attributes);
  501 +
  502 + if ((ret = key->encode(buf)) != ERROR_SUCCESS) {
  503 + srs_error("kafka encode message key failed. ret=%d", ret);
  504 + return ret;
  505 + }
  506 +
  507 + if ((ret = value->encode(buf)) != ERROR_SUCCESS) {
  508 + srs_error("kafka encode message value failed. ret=%d", ret);
  509 + return ret;
  510 + }
  511 +
  512 + return ret;
  513 +}
  514 +
  515 +int SrsKafkaRawMessage::decode(SrsBuffer* buf)
  516 +{
  517 + int ret = ERROR_SUCCESS;
  518 +
  519 + if (!buf->require(8 + 4 + 4 + 1 + 1)) {
  520 + ret = ERROR_KAFKA_CODEC_MESSAGE;
  521 + srs_error("kafka decode message failed. ret=%d", ret);
  522 + return ret;
  523 + }
  524 + offset = buf->read_8bytes();
  525 + message_size = buf->read_4bytes();
  526 + crc = buf->read_4bytes();
  527 + magic_byte = buf->read_1bytes();
  528 + attributes = buf->read_1bytes();
  529 +
  530 + if ((ret = key->decode(buf)) != ERROR_SUCCESS) {
  531 + srs_error("kafka decode message key failed. ret=%d", ret);
  532 + return ret;
  533 + }
  534 +
  535 + if ((ret = value->decode(buf)) != ERROR_SUCCESS) {
  536 + srs_error("kafka decode message value failed. ret=%d", ret);
  537 + return ret;
  538 + }
  539 +
  540 + return ret;
  541 +}
  542 +
481 SrsKafkaRawMessageSet::SrsKafkaRawMessageSet() 543 SrsKafkaRawMessageSet::SrsKafkaRawMessageSet()
482 { 544 {
483 } 545 }
@@ -492,6 +554,59 @@ SrsKafkaRawMessageSet::~SrsKafkaRawMessageSet() @@ -492,6 +554,59 @@ SrsKafkaRawMessageSet::~SrsKafkaRawMessageSet()
492 messages.clear(); 554 messages.clear();
493 } 555 }
494 556
  557 +void SrsKafkaRawMessageSet::append(SrsKafkaRawMessage* msg)
  558 +{
  559 + messages.push_back(msg);
  560 +}
  561 +
  562 +int SrsKafkaRawMessageSet::nb_bytes()
  563 +{
  564 + int s = 0;
  565 +
  566 + vector<SrsKafkaRawMessage*>::iterator it;
  567 + for (it = messages.begin(); it != messages.end(); ++it) {
  568 + SrsKafkaRawMessage* message = *it;
  569 + s += message->nb_bytes();
  570 + }
  571 +
  572 + return s;
  573 +}
  574 +
  575 +int SrsKafkaRawMessageSet::encode(SrsBuffer* buf)
  576 +{
  577 + int ret = ERROR_SUCCESS;
  578 +
  579 + vector<SrsKafkaRawMessage*>::iterator it;
  580 + for (it = messages.begin(); it != messages.end(); ++it) {
  581 + SrsKafkaRawMessage* message = *it;
  582 + if ((ret = message->encode(buf)) != ERROR_SUCCESS) {
  583 + srs_error("kafka encode message set failed. ret=%d", ret);
  584 + return ret;
  585 + }
  586 + }
  587 +
  588 + return ret;
  589 +}
  590 +
  591 +int SrsKafkaRawMessageSet::decode(SrsBuffer* buf)
  592 +{
  593 + int ret = ERROR_SUCCESS;
  594 +
  595 + while (!buf->empty()) {
  596 + SrsKafkaRawMessage* message = new SrsKafkaRawMessage();
  597 +
  598 + if ((ret = message->decode(buf)) != ERROR_SUCCESS) {
  599 + srs_freep(message);
  600 + srs_error("kafka decode message set elem failed. ret=%d", ret);
  601 + return ret;
  602 + }
  603 +
  604 + messages.push_back(message);
  605 + }
  606 +
  607 + return ret;
  608 +}
  609 +
495 SrsKafkaRequest::SrsKafkaRequest() 610 SrsKafkaRequest::SrsKafkaRequest()
496 { 611 {
497 header.set_correlation_id(SrsKafkaCorrelationPool::instance()->generate_correlation_id()); 612 header.set_correlation_id(SrsKafkaCorrelationPool::instance()->generate_correlation_id());
@@ -863,6 +978,155 @@ int SrsKafkaTopicMetadataResponse::decode(SrsBuffer* buf) @@ -863,6 +978,155 @@ int SrsKafkaTopicMetadataResponse::decode(SrsBuffer* buf)
863 return ret; 978 return ret;
864 } 979 }
865 980
  981 +int SrsKafkaProducerPartitionMessages::nb_bytes()
  982 +{
  983 + return 4 + 4 + messages.nb_bytes();
  984 +}
  985 +
  986 +int SrsKafkaProducerPartitionMessages::encode(SrsBuffer* buf)
  987 +{
  988 + int ret = ERROR_SUCCESS;
  989 +
  990 + if (!buf->require(4 + 4)) {
  991 + ret = ERROR_KAFKA_CODEC_PRODUCER;
  992 + srs_error("kafka encode producer failed. ret=%d", ret);
  993 + return ret;
  994 + }
  995 + buf->write_4bytes(partition);
  996 + buf->write_4bytes(message_set_size);
  997 +
  998 + if ((ret = messages.encode(buf)) != ERROR_SUCCESS) {
  999 + srs_error("kafka encode producer messages failed. ret=%d", ret);
  1000 + return ret;
  1001 + }
  1002 +
  1003 + return ret;
  1004 +}
  1005 +
  1006 +int SrsKafkaProducerPartitionMessages::decode(SrsBuffer* buf)
  1007 +{
  1008 + int ret = ERROR_SUCCESS;
  1009 +
  1010 + if (!buf->require(4 + 4)) {
  1011 + ret = ERROR_KAFKA_CODEC_PRODUCER;
  1012 + srs_error("kafka decode producer failed. ret=%d", ret);
  1013 + return ret;
  1014 + }
  1015 + partition = buf->read_4bytes();
  1016 + message_set_size = buf->read_4bytes();
  1017 +
  1018 + // for the message set decode util empty, we must create a new buffer when
  1019 + // there exists other objects after message set.
  1020 + if (buf->size() - buf->pos() != message_set_size) {
  1021 + SrsBuffer* tbuf = new SrsBuffer();
  1022 + SrsAutoFree(SrsBuffer, tbuf);
  1023 +
  1024 + if ((ret = tbuf->initialize(buf->data() + buf->pos(), message_set_size)) != ERROR_SUCCESS) {
  1025 + return ret;
  1026 + }
  1027 +
  1028 + if ((ret = messages.decode(tbuf)) != ERROR_SUCCESS) {
  1029 + srs_error("kafka decode procuder messages failed. ret=%d", ret);
  1030 + return ret;
  1031 + }
  1032 + } else {
  1033 + if ((ret = messages.decode(buf)) != ERROR_SUCCESS) {
  1034 + srs_error("kafka decode procuder messages failed. ret=%d", ret);
  1035 + return ret;
  1036 + }
  1037 + }
  1038 +
  1039 + return ret;
  1040 +}
  1041 +
  1042 +int SrsKafkaProducerTopicMessages::nb_bytes()
  1043 +{
  1044 + return topic_name.nb_bytes() + partitions.nb_bytes();
  1045 +}
  1046 +
  1047 +int SrsKafkaProducerTopicMessages::encode(SrsBuffer* buf)
  1048 +{
  1049 + int ret = ERROR_SUCCESS;
  1050 +
  1051 + if ((ret = topic_name.encode(buf)) != ERROR_SUCCESS) {
  1052 + return ret;
  1053 + }
  1054 +
  1055 + if ((ret = partitions.encode(buf)) != ERROR_SUCCESS) {
  1056 + return ret;
  1057 + }
  1058 +
  1059 + return ret;
  1060 +}
  1061 +
  1062 +int SrsKafkaProducerTopicMessages::decode(SrsBuffer* buf)
  1063 +{
  1064 + int ret = ERROR_SUCCESS;
  1065 +
  1066 + if ((ret = topic_name.decode(buf)) != ERROR_SUCCESS) {
  1067 + return ret;
  1068 + }
  1069 +
  1070 + if ((ret = partitions.decode(buf)) != ERROR_SUCCESS) {
  1071 + return ret;
  1072 + }
  1073 +
  1074 + return ret;
  1075 +}
  1076 +
  1077 +SrsKafkaProducerRequest::SrsKafkaProducerRequest()
  1078 +{
  1079 + required_acks = 0;
  1080 + timeout = 0;
  1081 +}
  1082 +
  1083 +SrsKafkaProducerRequest::~SrsKafkaProducerRequest()
  1084 +{
  1085 +}
  1086 +
  1087 +int SrsKafkaProducerRequest::nb_bytes()
  1088 +{
  1089 + return 2 + 4 + topics.nb_bytes();
  1090 +}
  1091 +
  1092 +int SrsKafkaProducerRequest::encode(SrsBuffer* buf)
  1093 +{
  1094 + int ret = ERROR_SUCCESS;
  1095 +
  1096 + if (!buf->require(2 + 4)) {
  1097 + ret = ERROR_KAFKA_CODEC_PRODUCER;
  1098 + srs_error("kafka encode producer failed. ret=%d", ret);
  1099 + return ret;
  1100 + }
  1101 + buf->write_2bytes(required_acks);
  1102 + buf->write_4bytes(timeout);
  1103 +
  1104 + if ((ret = topics.encode(buf)) != ERROR_SUCCESS) {
  1105 + srs_error("kafka encode producer topics failed. ret=%d", ret);
  1106 + }
  1107 +
  1108 + return ret;
  1109 +}
  1110 +
  1111 +int SrsKafkaProducerRequest::decode(SrsBuffer* buf)
  1112 +{
  1113 + int ret = ERROR_SUCCESS;
  1114 +
  1115 + if (!buf->require(2 + 4)) {
  1116 + ret = ERROR_KAFKA_CODEC_PRODUCER;
  1117 + srs_error("kafka decode producer failed. ret=%d", ret);
  1118 + return ret;
  1119 + }
  1120 + required_acks = buf->read_2bytes();
  1121 + timeout = buf->read_4bytes();
  1122 +
  1123 + if ((ret = topics.decode(buf)) != ERROR_SUCCESS) {
  1124 + srs_error("kafka decode producer topics failed. ret=%d", ret);
  1125 + }
  1126 +
  1127 + return ret;
  1128 +}
  1129 +
866 SrsKafkaCorrelationPool* SrsKafkaCorrelationPool::_instance = new SrsKafkaCorrelationPool(); 1130 SrsKafkaCorrelationPool* SrsKafkaCorrelationPool::_instance = new SrsKafkaCorrelationPool();
867 1131
868 SrsKafkaCorrelationPool* SrsKafkaCorrelationPool::instance() 1132 SrsKafkaCorrelationPool* SrsKafkaCorrelationPool::instance()
@@ -1085,6 +1349,13 @@ int SrsKafkaClient::fetch_metadata(string topic, SrsKafkaTopicMetadataResponse** @@ -1085,6 +1349,13 @@ int SrsKafkaClient::fetch_metadata(string topic, SrsKafkaTopicMetadataResponse**
1085 return ret; 1349 return ret;
1086 } 1350 }
1087 1351
  1352 +int SrsKafkaClient::write_messages(std::string topic, int32_t partition, vector<SrsJsonObject*>& msgs)
  1353 +{
  1354 + int ret = ERROR_SUCCESS;
  1355 + // TODO: FIXME: implements it.
  1356 + return ret;
  1357 +}
  1358 +
1088 vector<string> srs_kafka_array2vector(SrsKafkaArray<SrsKafkaString>* arr) 1359 vector<string> srs_kafka_array2vector(SrsKafkaArray<SrsKafkaString>* arr)
1089 { 1360 {
1090 vector<string> strs; 1361 vector<string> strs;
@@ -39,6 +39,7 @@ @@ -39,6 +39,7 @@
39 39
40 class SrsFastStream; 40 class SrsFastStream;
41 class ISrsProtocolReaderWriter; 41 class ISrsProtocolReaderWriter;
  42 +class SrsJsonObject;
42 43
43 #ifdef SRS_AUTO_KAFKA 44 #ifdef SRS_AUTO_KAFKA
44 45
@@ -484,7 +485,7 @@ public: @@ -484,7 +485,7 @@ public:
484 * the kafka message in message set. 485 * the kafka message in message set.
485 * @see https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-Messagesets 486 * @see https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-Messagesets
486 */ 487 */
487 -struct SrsKafkaRawMessage 488 +struct SrsKafkaRawMessage : public ISrsCodec
488 { 489 {
489 // metadata. 490 // metadata.
490 public: 491 public:
@@ -530,19 +531,32 @@ public: @@ -530,19 +531,32 @@ public:
530 public: 531 public:
531 SrsKafkaRawMessage(); 532 SrsKafkaRawMessage();
532 virtual ~SrsKafkaRawMessage(); 533 virtual ~SrsKafkaRawMessage();
  534 +// interface ISrsCodec
  535 +public:
  536 + virtual int nb_bytes();
  537 + virtual int encode(SrsBuffer* buf);
  538 + virtual int decode(SrsBuffer* buf);
533 }; 539 };
534 540
535 /** 541 /**
536 * a set of kafka message. 542 * a set of kafka message.
537 * @see https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-Messagesets 543 * @see https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-Messagesets
  544 + * @remark because the message set are not preceded by int32, so we decode the buffer util empty.
538 */ 545 */
539 -class SrsKafkaRawMessageSet 546 +class SrsKafkaRawMessageSet : public ISrsCodec
540 { 547 {
541 private: 548 private:
542 std::vector<SrsKafkaRawMessage*> messages; 549 std::vector<SrsKafkaRawMessage*> messages;
543 public: 550 public:
544 SrsKafkaRawMessageSet(); 551 SrsKafkaRawMessageSet();
545 virtual ~SrsKafkaRawMessageSet(); 552 virtual ~SrsKafkaRawMessageSet();
  553 +public:
  554 + virtual void append(SrsKafkaRawMessage* msg);
  555 +// interface ISrsCodec
  556 +public:
  557 + virtual int nb_bytes();
  558 + virtual int encode(SrsBuffer* buf);
  559 + virtual int decode(SrsBuffer* buf);
546 }; 560 };
547 561
548 /** 562 /**
@@ -704,6 +718,94 @@ public: @@ -704,6 +718,94 @@ public:
704 virtual int decode(SrsBuffer* buf); 718 virtual int decode(SrsBuffer* buf);
705 }; 719 };
706 720
  721 +
  722 +/**
  723 + * the messages for producer to send.
  724 + * @see https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ProduceRequest
  725 + */
  726 +struct SrsKafkaProducerPartitionMessages : public ISrsCodec
  727 +{
  728 +public:
  729 + /**
  730 + * The partition that data is being published to.
  731 + */
  732 + int32_t partition;
  733 + /**
  734 + * The size, in bytes, of the message set that follows.
  735 + */
  736 + int32_t message_set_size;
  737 + /**
  738 + * messages in set.
  739 + */
  740 + SrsKafkaRawMessageSet messages;
  741 +// interface ISrsCodec
  742 +public:
  743 + virtual int nb_bytes();
  744 + virtual int encode(SrsBuffer* buf);
  745 + virtual int decode(SrsBuffer* buf);
  746 +};
  747 +struct SrsKafkaProducerTopicMessages : public ISrsCodec
  748 +{
  749 +public:
  750 + /**
  751 + * The topic that data is being published to.
  752 + */
  753 + SrsKafkaString topic_name;
  754 + /**
  755 + * messages of partitions.
  756 + */
  757 + SrsKafkaArray<SrsKafkaProducerPartitionMessages> partitions;
  758 +// interface ISrsCodec
  759 +public:
  760 + virtual int nb_bytes();
  761 + virtual int encode(SrsBuffer* buf);
  762 + virtual int decode(SrsBuffer* buf);
  763 +};
  764 +
  765 +/**
  766 + * the request for producer to send message.
  767 + * @see https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ProduceRequest
  768 + */
  769 +class SrsKafkaProducerRequest : public SrsKafkaRequest
  770 +{
  771 +private:
  772 + /**
  773 + * This field indicates how many acknowledgements the servers should receive
  774 + * before responding to the request. If it is 0 the server will not send any
  775 + * response (this is the only case where the server will not reply to a request).
  776 + * If it is 1, the server will wait the data is written to the local log
  777 + * before sending a response. If it is -1 the server will block until the
  778 + * message is committed by all in sync replicas before sending a response.
  779 + * For any number > 1 the server will block waiting for this number of
  780 + * acknowledgements to occur (but the server will never wait for more
  781 + * acknowledgements than there are in-sync replicas).
  782 + */
  783 + int16_t required_acks;
  784 + /**
  785 + * This provides a maximum time in milliseconds the server can await the receipt
  786 + * of the number of acknowledgements in RequiredAcks. The timeout is not an exact
  787 + * limit on the request time for a few reasons: (1) it does not include network
  788 + * latency, (2) the timer begins at the beginning of the processing of this request
  789 + * so if many requests are queued due to server overload that wait time will not
  790 + * be included, (3) we will not terminate a local write so if the local write
  791 + * time exceeds this timeout it will not be respected. To get a hard timeout of
  792 + * this type the client should use the socket timeout.
  793 + */
  794 + int32_t timeout;
  795 + /**
  796 + * messages of topics.
  797 + */
  798 + SrsKafkaArray<SrsKafkaProducerTopicMessages> topics;
  799 +public:
  800 + SrsKafkaProducerRequest();
  801 + virtual ~SrsKafkaProducerRequest();
  802 +// interface ISrsCodec
  803 +public:
  804 + virtual int nb_bytes();
  805 + virtual int encode(SrsBuffer* buf);
  806 + virtual int decode(SrsBuffer* buf);
  807 +};
  808 +
707 /** 809 /**
708 * the poll to discovery reponse. 810 * the poll to discovery reponse.
709 * @param CorrelationId This is a user-supplied integer. It will be passed back 811 * @param CorrelationId This is a user-supplied integer. It will be passed back
@@ -813,6 +915,10 @@ public: @@ -813,6 +915,10 @@ public:
813 * fetch the metadata from broker for topic. 915 * fetch the metadata from broker for topic.
814 */ 916 */
815 virtual int fetch_metadata(std::string topic, SrsKafkaTopicMetadataResponse** pmsg); 917 virtual int fetch_metadata(std::string topic, SrsKafkaTopicMetadataResponse** pmsg);
  918 + /**
  919 + * write the messages to partition of topic.
  920 + */
  921 + virtual int write_messages(std::string topic, int32_t partition, std::vector<SrsJsonObject*>& msgs);
816 }; 922 };
817 923
818 // convert kafka array[string] to vector[string] 924 // convert kafka array[string] to vector[string]