mirror of
https://github.com/ossrs/srs.git
synced 2025-03-09 15:49:59 +00:00
Remove KAFKA. 3.0.53
This commit is contained in:
parent
bb3e8a41d1
commit
5d17bb8bb0
18 changed files with 7 additions and 3358 deletions
|
@ -1555,7 +1555,6 @@ srs_error_t SrsConfig::reload_conf(SrsConfig* conf)
|
|||
}
|
||||
|
||||
// TODO: FIXME: support reload stream_caster.
|
||||
// TODO: FIXME: support reload kafka.
|
||||
|
||||
// merge config: vhost
|
||||
if ((err = reload_vhost(old_root)) != srs_success) {
|
||||
|
@ -2139,19 +2138,6 @@ srs_error_t SrsConfig::global_to_json(SrsJsonObject* obj)
|
|||
}
|
||||
}
|
||||
obj->set(dir->name, sobj);
|
||||
} else if (dir->name == "kafka") {
|
||||
SrsJsonObject* sobj = SrsJsonAny::object();
|
||||
for (int j = 0; j < (int)dir->directives.size(); j++) {
|
||||
SrsConfDirective* sdir = dir->directives.at(j);
|
||||
if (sdir->name == "enabled") {
|
||||
sobj->set(sdir->name, sdir->dumps_arg0_to_boolean());
|
||||
} else if (sdir->name == "brokers") {
|
||||
sobj->set(sdir->name, sdir->dumps_args());
|
||||
} else if (sdir->name == "topic") {
|
||||
sobj->set(sdir->name, sdir->dumps_arg0_to_str());
|
||||
}
|
||||
}
|
||||
obj->set(dir->name, sobj);
|
||||
} else if (dir->name == "stream_caster") {
|
||||
SrsJsonObject* sobj = SrsJsonAny::object();
|
||||
for (int j = 0; j < (int)dir->directives.size(); j++) {
|
||||
|
@ -3511,7 +3497,7 @@ srs_error_t SrsConfig::check_normal_config()
|
|||
&& n != "srs_log_tank" && n != "srs_log_level" && n != "srs_log_file"
|
||||
&& n != "max_connections" && n != "daemon" && n != "heartbeat"
|
||||
&& n != "http_api" && n != "stats" && n != "vhost" && n != "pithy_print_ms"
|
||||
&& n != "http_server" && n != "stream_caster" && n != "kafka"
|
||||
&& n != "http_server" && n != "stream_caster"
|
||||
&& n != "utc_time" && n != "work_dir" && n != "asprocess"
|
||||
) {
|
||||
return srs_error_new(ERROR_SYSTEM_CONFIG_INVALID, "illegal directive %s", n.c_str());
|
||||
|
@ -3545,15 +3531,6 @@ srs_error_t SrsConfig::check_normal_config()
|
|||
}
|
||||
}
|
||||
}
|
||||
if (true) {
|
||||
SrsConfDirective* conf = root->get("kafka");
|
||||
for (int i = 0; conf && i < (int)conf->directives.size(); i++) {
|
||||
string n = conf->at(i)->name;
|
||||
if (n != "enabled" && n != "brokers" && n != "topic") {
|
||||
return srs_error_new(ERROR_SYSTEM_CONFIG_INVALID, "illegal kafka.%s", n.c_str());
|
||||
}
|
||||
}
|
||||
}
|
||||
if (true) {
|
||||
SrsConfDirective* conf = get_heartbeart();
|
||||
for (int i = 0; conf && i < (int)conf->directives.size(); i++) {
|
||||
|
@ -4208,55 +4185,6 @@ int SrsConfig::get_stream_caster_rtp_port_max(SrsConfDirective* conf)
|
|||
return ::atoi(conf->arg0().c_str());
|
||||
}
|
||||
|
||||
bool SrsConfig::get_kafka_enabled()
|
||||
{
|
||||
static bool DEFAULT = false;
|
||||
|
||||
SrsConfDirective* conf = root->get("kafka");
|
||||
if (!conf) {
|
||||
return DEFAULT;
|
||||
}
|
||||
|
||||
conf = conf->get("enabled");
|
||||
if (!conf || conf->arg0().empty()) {
|
||||
return DEFAULT;
|
||||
}
|
||||
|
||||
return SRS_CONF_PERFER_FALSE(conf->arg0());
|
||||
}
|
||||
|
||||
SrsConfDirective* SrsConfig::get_kafka_brokers()
|
||||
{
|
||||
SrsConfDirective* conf = root->get("kafka");
|
||||
if (!conf) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
conf = conf->get("brokers");
|
||||
if (!conf || conf->args.empty()) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return conf;
|
||||
}
|
||||
|
||||
string SrsConfig::get_kafka_topic()
|
||||
{
|
||||
static string DEFAULT = "srs";
|
||||
|
||||
SrsConfDirective* conf = root->get("kafka");
|
||||
if (!conf) {
|
||||
return DEFAULT;
|
||||
}
|
||||
|
||||
conf = conf->get("topic");
|
||||
if (!conf || conf->arg0().empty()) {
|
||||
return DEFAULT;
|
||||
}
|
||||
|
||||
return conf->arg0();
|
||||
}
|
||||
|
||||
SrsConfDirective* SrsConfig::get_vhost(string vhost, bool try_default_vhost)
|
||||
{
|
||||
srs_assert(root);
|
||||
|
|
|
@ -455,14 +455,6 @@ public:
|
|||
virtual int get_stream_caster_rtp_port_min(SrsConfDirective* conf);
|
||||
// Get the max udp port for rtp of stream caster rtsp.
|
||||
virtual int get_stream_caster_rtp_port_max(SrsConfDirective* conf);
|
||||
// kafka section.
|
||||
public:
|
||||
// Whether the kafka enabled.
|
||||
virtual bool get_kafka_enabled();
|
||||
// Get the broker list, each is format in <ip:port>.
|
||||
virtual SrsConfDirective* get_kafka_brokers();
|
||||
// Get the kafka topic to use for srs.
|
||||
virtual std::string get_kafka_topic();
|
||||
// vhost specified section
|
||||
public:
|
||||
// Get the vhost directive by vhost name.
|
||||
|
|
|
@ -1,659 +0,0 @@
|
|||
/**
|
||||
* The MIT License (MIT)
|
||||
*
|
||||
* Copyright (c) 2013-2019 Winlin
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
* this software and associated documentation files (the "Software"), to deal in
|
||||
* the Software without restriction, including without limitation the rights to
|
||||
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
* the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
* subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in all
|
||||
* copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <srs_app_kafka.hpp>
|
||||
|
||||
#include <vector>
|
||||
using namespace std;
|
||||
|
||||
#include <srs_kernel_error.hpp>
|
||||
#include <srs_kernel_log.hpp>
|
||||
#include <srs_app_config.hpp>
|
||||
#include <srs_app_async_call.hpp>
|
||||
#include <srs_app_utility.hpp>
|
||||
#include <srs_kernel_utility.hpp>
|
||||
#include <srs_protocol_utility.hpp>
|
||||
#include <srs_kernel_balance.hpp>
|
||||
#include <srs_kafka_stack.hpp>
|
||||
#include <srs_core_autofree.hpp>
|
||||
#include <srs_protocol_json.hpp>
|
||||
|
||||
#ifdef SRS_AUTO_KAFKA
|
||||
|
||||
#define SRS_KAFKA_PRODUCER_TIMEOUT (30 * SRS_UTIME_MILLISECONDS)
|
||||
#define SRS_KAFKA_PRODUCER_AGGREGATE_SIZE 1
|
||||
|
||||
std::string srs_kafka_metadata_summary(SrsKafkaTopicMetadataResponse* metadata)
|
||||
{
|
||||
vector<string> bs;
|
||||
for (int i = 0; i < metadata->brokers.size(); i++) {
|
||||
SrsKafkaBroker* broker = metadata->brokers.at(i);
|
||||
|
||||
string hostport = srs_int2str(broker->node_id) + "/" + broker->host.to_str();
|
||||
if (broker->port > 0) {
|
||||
hostport += ":" + srs_int2str(broker->port);
|
||||
}
|
||||
|
||||
bs.push_back(hostport);
|
||||
}
|
||||
|
||||
vector<string> ps;
|
||||
for (int i = 0; i < metadata->metadatas.size(); i++) {
|
||||
SrsKafkaTopicMetadata* topic = metadata->metadatas.at(i);
|
||||
|
||||
for (int j = 0; j < topic->metadatas.size(); j++) {
|
||||
string desc = "topic=" + topic->name.to_str();
|
||||
|
||||
SrsKafkaPartitionMetadata* partition = topic->metadatas.at(j);
|
||||
|
||||
desc += "?partition=" + srs_int2str(partition->partition_id);
|
||||
desc += "&leader=" + srs_int2str(partition->leader);
|
||||
|
||||
vector<string> replicas = srs_kafka_array2vector(&partition->replicas);
|
||||
desc += "&replicas=" + srs_join_vector_string(replicas, ",");
|
||||
|
||||
ps.push_back(desc);
|
||||
}
|
||||
}
|
||||
|
||||
std::stringstream ss;
|
||||
ss << "brokers=" << srs_join_vector_string(bs, ",");
|
||||
ss << ", " << srs_join_vector_string(ps, ", ");
|
||||
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string srs_kafka_summary_partitions(const vector<SrsKafkaPartition*>& partitions)
|
||||
{
|
||||
vector<string> ret;
|
||||
|
||||
vector<SrsKafkaPartition*>::const_iterator it;
|
||||
for (it = partitions.begin(); it != partitions.end(); ++it) {
|
||||
SrsKafkaPartition* partition = *it;
|
||||
|
||||
string desc = "tcp://";
|
||||
desc += partition->host + ":" + srs_int2str(partition->port);
|
||||
desc += "?broker=" + srs_int2str(partition->broker);
|
||||
desc += "&partition=" + srs_int2str(partition->id);
|
||||
ret.push_back(desc);
|
||||
}
|
||||
|
||||
return srs_join_vector_string(ret, ", ");
|
||||
}
|
||||
|
||||
void srs_kafka_metadata2connector(string topic_name, SrsKafkaTopicMetadataResponse* metadata, vector<SrsKafkaPartition*>& partitions)
|
||||
{
|
||||
for (int i = 0; i < metadata->metadatas.size(); i++) {
|
||||
SrsKafkaTopicMetadata* topic = metadata->metadatas.at(i);
|
||||
|
||||
for (int j = 0; j < topic->metadatas.size(); j++) {
|
||||
SrsKafkaPartitionMetadata* partition = topic->metadatas.at(j);
|
||||
|
||||
SrsKafkaPartition* p = new SrsKafkaPartition();
|
||||
|
||||
p->topic = topic_name;
|
||||
p->id = partition->partition_id;
|
||||
p->broker = partition->leader;
|
||||
|
||||
for (int i = 0; i < metadata->brokers.size(); i++) {
|
||||
SrsKafkaBroker* broker = metadata->brokers.at(i);
|
||||
if (broker->node_id == p->broker) {
|
||||
p->host = broker->host.to_str();
|
||||
p->port = broker->port;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
partitions.push_back(p);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
SrsKafkaPartition::SrsKafkaPartition()
|
||||
{
|
||||
id = broker = 0;
|
||||
port = SRS_CONSTS_KAFKA_DEFAULT_PORT;
|
||||
|
||||
transport = NULL;
|
||||
kafka = NULL;
|
||||
}
|
||||
|
||||
SrsKafkaPartition::~SrsKafkaPartition()
|
||||
{
|
||||
disconnect();
|
||||
}
|
||||
|
||||
string SrsKafkaPartition::hostport()
|
||||
{
|
||||
if (ep.empty()) {
|
||||
ep = host + ":" + srs_int2str(port);
|
||||
}
|
||||
|
||||
return ep;
|
||||
}
|
||||
|
||||
srs_error_t SrsKafkaPartition::connect()
|
||||
{
|
||||
srs_error_t err = srs_success;
|
||||
|
||||
if (transport) {
|
||||
return err;
|
||||
}
|
||||
transport = new SrsTcpClient(host, port, SRS_KAFKA_PRODUCER_TIMEOUT);
|
||||
kafka = new SrsKafkaClient(transport);
|
||||
|
||||
if ((err = transport->connect()) != srs_success) {
|
||||
disconnect();
|
||||
return srs_error_wrap(err, "connect to %s partition=%d failed", hostport().c_str(), id);
|
||||
}
|
||||
|
||||
srs_trace("connect at %s, partition=%d, broker=%d", hostport().c_str(), id, broker);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
srs_error_t SrsKafkaPartition::flush(SrsKafkaPartitionCache* pc)
|
||||
{
|
||||
return kafka->write_messages(topic, id, *pc);
|
||||
}
|
||||
|
||||
void SrsKafkaPartition::disconnect()
|
||||
{
|
||||
srs_freep(kafka);
|
||||
srs_freep(transport);
|
||||
}
|
||||
|
||||
SrsKafkaMessage::SrsKafkaMessage(SrsKafkaProducer* p, int k, SrsJsonObject* j)
|
||||
{
|
||||
producer = p;
|
||||
key = k;
|
||||
obj = j;
|
||||
}
|
||||
|
||||
SrsKafkaMessage::~SrsKafkaMessage()
|
||||
{
|
||||
srs_freep(obj);
|
||||
}
|
||||
|
||||
srs_error_t SrsKafkaMessage::call()
|
||||
{
|
||||
srs_error_t err = producer->send(key, obj);
|
||||
|
||||
// the obj is manged by producer now.
|
||||
obj = NULL;
|
||||
|
||||
return srs_error_wrap(err, "kafka send");
|
||||
}
|
||||
|
||||
string SrsKafkaMessage::to_string()
|
||||
{
|
||||
return "kafka";
|
||||
}
|
||||
|
||||
SrsKafkaCache::SrsKafkaCache()
|
||||
{
|
||||
count = 0;
|
||||
nb_partitions = 0;
|
||||
}
|
||||
|
||||
SrsKafkaCache::~SrsKafkaCache()
|
||||
{
|
||||
map<int32_t, SrsKafkaPartitionCache*>::iterator it;
|
||||
for (it = cache.begin(); it != cache.end(); ++it) {
|
||||
SrsKafkaPartitionCache* pc = it->second;
|
||||
|
||||
for (vector<SrsJsonObject*>::iterator it2 = pc->begin(); it2 != pc->end(); ++it2) {
|
||||
SrsJsonObject* obj = *it2;
|
||||
srs_freep(obj);
|
||||
}
|
||||
pc->clear();
|
||||
|
||||
srs_freep(pc);
|
||||
}
|
||||
cache.clear();
|
||||
}
|
||||
|
||||
void SrsKafkaCache::append(int key, SrsJsonObject* obj)
|
||||
{
|
||||
count++;
|
||||
|
||||
int partition = 0;
|
||||
if (nb_partitions > 0) {
|
||||
partition = key % nb_partitions;
|
||||
}
|
||||
|
||||
SrsKafkaPartitionCache* pc = NULL;
|
||||
map<int32_t, SrsKafkaPartitionCache*>::iterator it = cache.find(partition);
|
||||
if (it == cache.end()) {
|
||||
pc = new SrsKafkaPartitionCache();
|
||||
cache[partition] = pc;
|
||||
} else {
|
||||
pc = it->second;
|
||||
}
|
||||
|
||||
pc->push_back(obj);
|
||||
}
|
||||
|
||||
int SrsKafkaCache::size()
|
||||
{
|
||||
return count;
|
||||
}
|
||||
|
||||
bool SrsKafkaCache::fetch(int* pkey, SrsKafkaPartitionCache** ppc)
|
||||
{
|
||||
map<int32_t, SrsKafkaPartitionCache*>::iterator it;
|
||||
for (it = cache.begin(); it != cache.end(); ++it) {
|
||||
int32_t key = it->first;
|
||||
SrsKafkaPartitionCache* pc = it->second;
|
||||
|
||||
if (!pc->empty()) {
|
||||
*pkey = (int)key;
|
||||
*ppc = pc;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
srs_error_t SrsKafkaCache::flush(SrsKafkaPartition* partition, int key, SrsKafkaPartitionCache* pc)
|
||||
{
|
||||
srs_error_t err = srs_success;
|
||||
|
||||
// ensure the key exists.
|
||||
srs_assert (cache.find(key) != cache.end());
|
||||
|
||||
// the cache is vector, which is continous store.
|
||||
// we remember the messages we have written and clear it when completed.
|
||||
int nb_msgs = (int)pc->size();
|
||||
if (pc->empty()) {
|
||||
return err;
|
||||
}
|
||||
|
||||
// connect transport.
|
||||
if ((err = partition->connect()) != srs_success) {
|
||||
return srs_error_wrap(err, "connect partition");
|
||||
}
|
||||
|
||||
// write the json objects.
|
||||
if ((err = partition->flush(pc)) != srs_success) {
|
||||
return srs_error_wrap(err, "flush partition");
|
||||
}
|
||||
|
||||
// free all wrote messages.
|
||||
for (vector<SrsJsonObject*>::iterator it = pc->begin(); it != pc->end(); ++it) {
|
||||
SrsJsonObject* obj = *it;
|
||||
srs_freep(obj);
|
||||
}
|
||||
|
||||
// remove the messages from cache.
|
||||
if ((int)pc->size() == nb_msgs) {
|
||||
pc->clear();
|
||||
} else {
|
||||
pc->erase(pc->begin(), pc->begin() + nb_msgs);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
ISrsKafkaCluster::ISrsKafkaCluster()
|
||||
{
|
||||
}
|
||||
|
||||
ISrsKafkaCluster::~ISrsKafkaCluster()
|
||||
{
|
||||
}
|
||||
|
||||
// @global kafka event producer, user must use srs_initialize_kafka to initialize it.
|
||||
ISrsKafkaCluster* _srs_kafka = NULL;
|
||||
|
||||
srs_error_t srs_initialize_kafka()
|
||||
{
|
||||
srs_error_t err = srs_success;
|
||||
|
||||
SrsKafkaProducer* kafka = new SrsKafkaProducer();
|
||||
_srs_kafka = kafka;
|
||||
|
||||
if ((err = kafka->initialize()) != srs_success) {
|
||||
return srs_error_wrap(err, "initialize kafka producer");
|
||||
}
|
||||
|
||||
if ((err = kafka->start()) != srs_success) {
|
||||
return srs_error_wrap(err, "start kafka producer");
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void srs_dispose_kafka()
|
||||
{
|
||||
SrsKafkaProducer* kafka = dynamic_cast<SrsKafkaProducer*>(_srs_kafka);
|
||||
if (!kafka) {
|
||||
return;
|
||||
}
|
||||
|
||||
kafka->stop();
|
||||
|
||||
srs_freep(kafka);
|
||||
_srs_kafka = NULL;
|
||||
}
|
||||
|
||||
SrsKafkaProducer::SrsKafkaProducer()
|
||||
{
|
||||
metadata_ok = false;
|
||||
metadata_expired = srs_cond_new();
|
||||
|
||||
lock = srs_mutex_new();
|
||||
trd = new SrsDummyCoroutine();
|
||||
worker = new SrsAsyncCallWorker();
|
||||
cache = new SrsKafkaCache();
|
||||
|
||||
lb = new SrsLbRoundRobin();
|
||||
}
|
||||
|
||||
SrsKafkaProducer::~SrsKafkaProducer()
|
||||
{
|
||||
clear_metadata();
|
||||
|
||||
srs_freep(lb);
|
||||
|
||||
srs_freep(worker);
|
||||
srs_freep(trd);
|
||||
srs_freep(cache);
|
||||
|
||||
srs_mutex_destroy(lock);
|
||||
srs_cond_destroy(metadata_expired);
|
||||
}
|
||||
|
||||
srs_error_t SrsKafkaProducer::initialize()
|
||||
{
|
||||
enabled = _srs_config->get_kafka_enabled();
|
||||
srs_info("initialize kafka ok, enabled=%d.", enabled);
|
||||
return srs_success;
|
||||
}
|
||||
|
||||
srs_error_t SrsKafkaProducer::start()
|
||||
{
|
||||
srs_error_t err = srs_success;
|
||||
|
||||
if (!enabled) {
|
||||
return err;
|
||||
}
|
||||
|
||||
if ((err = worker->start()) != srs_success) {
|
||||
return srs_error_wrap(err, "async worker");
|
||||
}
|
||||
|
||||
srs_freep(trd);
|
||||
trd = new SrsSTCoroutine("kafka", this, _srs_context->get_id());
|
||||
if ((err = trd->start()) != srs_success) {
|
||||
return srs_error_wrap(err, "coroutine");
|
||||
}
|
||||
|
||||
refresh_metadata();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void SrsKafkaProducer::stop()
|
||||
{
|
||||
if (!enabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
trd->stop();
|
||||
worker->stop();
|
||||
}
|
||||
|
||||
srs_error_t SrsKafkaProducer::send(int key, SrsJsonObject* obj)
|
||||
{
|
||||
srs_error_t err = srs_success;
|
||||
|
||||
// cache the json object.
|
||||
cache->append(key, obj);
|
||||
|
||||
// too few messages, ignore.
|
||||
if (cache->size() < SRS_KAFKA_PRODUCER_AGGREGATE_SIZE) {
|
||||
return err;
|
||||
}
|
||||
|
||||
// too many messages, warn user.
|
||||
if (cache->size() > SRS_KAFKA_PRODUCER_AGGREGATE_SIZE * 10) {
|
||||
srs_warn("kafka cache too many messages: %d", cache->size());
|
||||
}
|
||||
|
||||
// sync with backgound metadata worker.
|
||||
SrsLocker(lock);
|
||||
|
||||
// flush message when metadata is ok.
|
||||
if (metadata_ok) {
|
||||
err = flush();
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
srs_error_t SrsKafkaProducer::on_client(int key, SrsListenerType type, string ip)
|
||||
{
|
||||
srs_error_t err = srs_success;
|
||||
|
||||
if (!enabled) {
|
||||
return err;
|
||||
}
|
||||
|
||||
SrsJsonObject* obj = SrsJsonAny::object();
|
||||
|
||||
obj->set("msg", SrsJsonAny::str("accept"));
|
||||
obj->set("type", SrsJsonAny::integer(type));
|
||||
obj->set("ip", SrsJsonAny::str(ip.c_str()));
|
||||
|
||||
return worker->execute(new SrsKafkaMessage(this, key, obj));
|
||||
}
|
||||
|
||||
srs_error_t SrsKafkaProducer::on_close(int key)
|
||||
{
|
||||
srs_error_t err = srs_success;
|
||||
|
||||
if (!enabled) {
|
||||
return err;
|
||||
}
|
||||
|
||||
SrsJsonObject* obj = SrsJsonAny::object();
|
||||
|
||||
obj->set("msg", SrsJsonAny::str("close"));
|
||||
|
||||
return worker->execute(new SrsKafkaMessage(this, key, obj));
|
||||
}
|
||||
|
||||
#define SRS_KAKFA_CIMS (3 * SRS_UTIME_SECONDS)
|
||||
|
||||
srs_error_t SrsKafkaProducer::cycle()
|
||||
{
|
||||
srs_error_t err = srs_success;
|
||||
|
||||
// wait for the metadata expired.
|
||||
// when metadata is ok, wait for it expired.
|
||||
if (metadata_ok) {
|
||||
srs_cond_wait(metadata_expired);
|
||||
}
|
||||
|
||||
// request to lock to acquire the socket.
|
||||
SrsLocker(lock);
|
||||
|
||||
while (true) {
|
||||
if ((err = do_cycle()) != srs_success) {
|
||||
srs_warn("KafkaProducer: Ignore error, %s", srs_error_desc(err).c_str());
|
||||
srs_freep(err);
|
||||
}
|
||||
|
||||
if ((err = trd->pull()) != srs_success) {
|
||||
return srs_error_wrap(err, "kafka cycle");
|
||||
}
|
||||
|
||||
srs_usleep(SRS_KAKFA_CIMS);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void SrsKafkaProducer::clear_metadata()
|
||||
{
|
||||
vector<SrsKafkaPartition*>::iterator it;
|
||||
|
||||
for (it = partitions.begin(); it != partitions.end(); ++it) {
|
||||
SrsKafkaPartition* partition = *it;
|
||||
srs_freep(partition);
|
||||
}
|
||||
|
||||
partitions.clear();
|
||||
}
|
||||
|
||||
srs_error_t SrsKafkaProducer::do_cycle()
|
||||
{
|
||||
srs_error_t err = srs_success;
|
||||
|
||||
// ignore when disabled.
|
||||
if (!enabled) {
|
||||
return err;
|
||||
}
|
||||
|
||||
// when kafka enabled, request metadata when startup.
|
||||
if ((err = request_metadata()) != srs_success) {
|
||||
return srs_error_wrap(err, "request metadata");
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
srs_error_t SrsKafkaProducer::request_metadata()
|
||||
{
|
||||
srs_error_t err = srs_success;
|
||||
|
||||
// ignore when disabled.
|
||||
if (!enabled) {
|
||||
return err;
|
||||
}
|
||||
|
||||
// select one broker to connect to.
|
||||
SrsConfDirective* brokers = _srs_config->get_kafka_brokers();
|
||||
if (!brokers) {
|
||||
srs_warn("ignore for empty brokers.");
|
||||
return err;
|
||||
}
|
||||
|
||||
std::string server;
|
||||
int port = SRS_CONSTS_KAFKA_DEFAULT_PORT;
|
||||
if (true) {
|
||||
srs_assert(!brokers->args.empty());
|
||||
std::string broker = lb->select(brokers->args);
|
||||
srs_parse_endpoint(broker, server, port);
|
||||
}
|
||||
|
||||
std::string topic = _srs_config->get_kafka_topic();
|
||||
if (true) {
|
||||
std::string senabled = srs_bool2switch(enabled);
|
||||
std::string sbrokers = srs_join_vector_string(brokers->args, ",");
|
||||
srs_trace("kafka request enabled:%s, brokers:%s, current:[%d]%s:%d, topic:%s",
|
||||
senabled.c_str(), sbrokers.c_str(), lb->current(), server.c_str(), port, topic.c_str());
|
||||
}
|
||||
|
||||
SrsTcpClient* transport = new SrsTcpClient(server, port, SRS_CONSTS_KAFKA_TIMEOUT);
|
||||
SrsAutoFree(SrsTcpClient, transport);
|
||||
|
||||
SrsKafkaClient* kafka = new SrsKafkaClient(transport);
|
||||
SrsAutoFree(SrsKafkaClient, kafka);
|
||||
|
||||
// reconnect to kafka server.
|
||||
if ((err = transport->connect()) != srs_success) {
|
||||
return srs_error_wrap(err, "connect %s:%d failed", server.c_str(), port);
|
||||
}
|
||||
|
||||
// do fetch medata from broker.
|
||||
SrsKafkaTopicMetadataResponse* metadata = NULL;
|
||||
if ((err = kafka->fetch_metadata(topic, &metadata)) != srs_success) {
|
||||
return srs_error_wrap(err, "fetch metadata");
|
||||
}
|
||||
SrsAutoFree(SrsKafkaTopicMetadataResponse, metadata);
|
||||
|
||||
// we may need to request multiple times.
|
||||
// for example, the first time to create a none-exists topic, then query metadata.
|
||||
if (!metadata->metadatas.empty()) {
|
||||
SrsKafkaTopicMetadata* topic = metadata->metadatas.at(0);
|
||||
if (topic->metadatas.empty()) {
|
||||
srs_warn("topic %s metadata empty, retry.", topic->name.to_str().c_str());
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
// show kafka metadata.
|
||||
string summary = srs_kafka_metadata_summary(metadata);
|
||||
srs_trace("kafka metadata: %s", summary.c_str());
|
||||
|
||||
// generate the partition info.
|
||||
srs_kafka_metadata2connector(topic, metadata, partitions);
|
||||
srs_trace("kafka connector: %s", srs_kafka_summary_partitions(partitions).c_str());
|
||||
|
||||
// update the total partition for cache.
|
||||
cache->nb_partitions = (int)partitions.size();
|
||||
|
||||
metadata_ok = true;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void SrsKafkaProducer::refresh_metadata()
|
||||
{
|
||||
clear_metadata();
|
||||
|
||||
metadata_ok = false;
|
||||
srs_cond_signal(metadata_expired);
|
||||
srs_trace("kafka async refresh metadata in background");
|
||||
}
|
||||
|
||||
srs_error_t SrsKafkaProducer::flush()
|
||||
{
|
||||
srs_error_t err = srs_success;
|
||||
|
||||
// flush all available partition caches.
|
||||
while (true) {
|
||||
int key = -1;
|
||||
SrsKafkaPartitionCache* pc = NULL;
|
||||
|
||||
// all flushed, or no kafka partition to write to.
|
||||
if (!cache->fetch(&key, &pc) || partitions.empty()) {
|
||||
break;
|
||||
}
|
||||
|
||||
// flush specified partition.
|
||||
srs_assert(key >= 0 && pc);
|
||||
SrsKafkaPartition* partition = partitions.at(key % partitions.size());
|
||||
if ((err = cache->flush(partition, key, pc)) != srs_success) {
|
||||
return srs_error_wrap(err, "flush partition");
|
||||
}
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
@ -1,190 +0,0 @@
|
|||
/**
|
||||
* The MIT License (MIT)
|
||||
*
|
||||
* Copyright (c) 2013-2019 Winlin
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
* this software and associated documentation files (the "Software"), to deal in
|
||||
* the Software without restriction, including without limitation the rights to
|
||||
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
* the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
* subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in all
|
||||
* copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef SRS_APP_KAFKA_HPP
|
||||
#define SRS_APP_KAFKA_HPP
|
||||
|
||||
#include <srs_core.hpp>
|
||||
|
||||
#include <map>
|
||||
#include <vector>
|
||||
|
||||
class SrsLbRoundRobin;
|
||||
class SrsAsyncCallWorker;
|
||||
class SrsTcpClient;
|
||||
class SrsKafkaClient;
|
||||
class SrsJsonObject;
|
||||
class SrsKafkaProducer;
|
||||
|
||||
#include <srs_app_thread.hpp>
|
||||
#include <srs_app_server.hpp>
|
||||
#include <srs_app_async_call.hpp>
|
||||
|
||||
#ifdef SRS_AUTO_KAFKA
|
||||
|
||||
// The partition messages cache.
|
||||
typedef std::vector<SrsJsonObject*> SrsKafkaPartitionCache;
|
||||
|
||||
// The kafka partition info.
|
||||
struct SrsKafkaPartition
|
||||
{
|
||||
private:
|
||||
std::string ep;
|
||||
// Not NULL when connected.
|
||||
SrsTcpClient* transport;
|
||||
SrsKafkaClient* kafka;
|
||||
public:
|
||||
int id;
|
||||
std::string topic;
|
||||
// leader.
|
||||
int broker;
|
||||
std::string host;
|
||||
int port;
|
||||
public:
|
||||
SrsKafkaPartition();
|
||||
virtual ~SrsKafkaPartition();
|
||||
public:
|
||||
virtual std::string hostport();
|
||||
virtual srs_error_t connect();
|
||||
virtual srs_error_t flush(SrsKafkaPartitionCache* pc);
|
||||
private:
|
||||
virtual void disconnect();
|
||||
};
|
||||
|
||||
// The following is all types of kafka messages.
|
||||
class SrsKafkaMessage : public ISrsAsyncCallTask
|
||||
{
|
||||
private:
|
||||
SrsKafkaProducer* producer;
|
||||
int key;
|
||||
SrsJsonObject* obj;
|
||||
public:
|
||||
SrsKafkaMessage(SrsKafkaProducer* p, int k, SrsJsonObject* j);
|
||||
virtual ~SrsKafkaMessage();
|
||||
// Interface ISrsAsyncCallTask
|
||||
public:
|
||||
virtual srs_error_t call();
|
||||
virtual std::string to_string();
|
||||
};
|
||||
|
||||
// A message cache for kafka.
|
||||
class SrsKafkaCache
|
||||
{
|
||||
public:
|
||||
// The total partitions,
|
||||
// for the key to map to the parition by key%nb_partitions.
|
||||
int nb_partitions;
|
||||
private:
|
||||
// Total messages for all partitions.
|
||||
int count;
|
||||
// The key is the partition id, value is the message set to write to this partition.
|
||||
// @remark, when refresh metadata, the partition will increase,
|
||||
// so maybe some message will dispatch to new partition.
|
||||
std::map< int32_t, SrsKafkaPartitionCache*> cache;
|
||||
public:
|
||||
SrsKafkaCache();
|
||||
virtual ~SrsKafkaCache();
|
||||
public:
|
||||
virtual void append(int key, SrsJsonObject* obj);
|
||||
virtual int size();
|
||||
// Fetch out a available partition cache.
|
||||
// @return true when got a key and pc; otherwise, false.
|
||||
virtual bool fetch(int* pkey, SrsKafkaPartitionCache** ppc);
|
||||
// Flush the specified partition cache.
|
||||
virtual srs_error_t flush(SrsKafkaPartition* partition, int key, SrsKafkaPartitionCache* pc);
|
||||
};
|
||||
|
||||
// The kafka cluster interface.
|
||||
class ISrsKafkaCluster
|
||||
{
|
||||
public:
|
||||
ISrsKafkaCluster();
|
||||
virtual ~ISrsKafkaCluster();
|
||||
public:
|
||||
// When got any client connect to SRS, notify kafka.
|
||||
// @param key the partition map key, the client id or hash(ip).
|
||||
// @param type the type of client.
|
||||
// @param ip the peer ip of client.
|
||||
virtual srs_error_t on_client(int key, SrsListenerType type, std::string ip) = 0;
|
||||
// When client close or disconnect for error.
|
||||
// @param key the partition map key, the client id or hash(ip).
|
||||
virtual srs_error_t on_close(int key) = 0;
|
||||
};
|
||||
|
||||
// @global kafka event producer.
|
||||
extern ISrsKafkaCluster* _srs_kafka;
|
||||
// kafka initialize and disposer for global object.
|
||||
extern srs_error_t srs_initialize_kafka();
|
||||
extern void srs_dispose_kafka();
|
||||
|
||||
// The kafka producer used to save log to kafka cluster.
|
||||
class SrsKafkaProducer : virtual public ISrsCoroutineHandler, virtual public ISrsKafkaCluster
|
||||
{
|
||||
private:
|
||||
// TODO: FIXME: support reload.
|
||||
bool enabled;
|
||||
srs_mutex_t lock;
|
||||
SrsCoroutine* trd;
|
||||
private:
|
||||
bool metadata_ok;
|
||||
srs_cond_t metadata_expired;
|
||||
public:
|
||||
std::vector<SrsKafkaPartition*> partitions;
|
||||
SrsKafkaCache* cache;
|
||||
private:
|
||||
SrsLbRoundRobin* lb;
|
||||
SrsAsyncCallWorker* worker;
|
||||
public:
|
||||
SrsKafkaProducer();
|
||||
virtual ~SrsKafkaProducer();
|
||||
public:
|
||||
virtual srs_error_t initialize();
|
||||
virtual srs_error_t start();
|
||||
virtual void stop();
|
||||
// internal: for worker to call task to send object.
|
||||
public:
|
||||
// Send json object to kafka cluster.
|
||||
// The producer will aggregate message and send in kafka message set.
|
||||
// @param key the key to map to the partition, user can use cid or hash.
|
||||
// @param obj the json object; user must never free it again.
|
||||
virtual srs_error_t send(int key, SrsJsonObject* obj);
|
||||
// Interface ISrsKafkaCluster
|
||||
public:
|
||||
virtual srs_error_t on_client(int key, SrsListenerType type, std::string ip);
|
||||
virtual srs_error_t on_close(int key);
|
||||
// Interface ISrsReusableThreadHandler
|
||||
public:
|
||||
virtual srs_error_t cycle();
|
||||
private:
|
||||
virtual void clear_metadata();
|
||||
virtual srs_error_t do_cycle();
|
||||
virtual srs_error_t request_metadata();
|
||||
// Set the metadata to invalid and refresh it.
|
||||
virtual void refresh_metadata();
|
||||
virtual srs_error_t flush();
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
@ -55,7 +55,6 @@ using namespace std;
|
|||
#include <srs_app_statistic.hpp>
|
||||
#include <srs_protocol_utility.hpp>
|
||||
#include <srs_protocol_json.hpp>
|
||||
#include <srs_app_kafka.hpp>
|
||||
|
||||
// the timeout in srs_utime_t to wait encoder to republish
|
||||
// if timeout, close the connection.
|
||||
|
@ -154,13 +153,6 @@ srs_error_t SrsRtmpConn::do_cycle()
|
|||
|
||||
srs_trace("RTMP client ip=%s, fd=%d", ip.c_str(), srs_netfd_fileno(stfd));
|
||||
|
||||
// notify kafka cluster.
|
||||
#ifdef SRS_AUTO_KAFKA
|
||||
if ((err = _srs_kafka->on_client(srs_id(), SrsListenerRtmpStream, ip)) != srs_success) {
|
||||
return srs_error_wrap(err, "kafka on client");
|
||||
}
|
||||
#endif
|
||||
|
||||
rtmp->set_recv_timeout(SRS_CONSTS_RTMP_TIMEOUT);
|
||||
rtmp->set_send_timeout(SRS_CONSTS_RTMP_TIMEOUT);
|
||||
|
||||
|
@ -1194,12 +1186,6 @@ srs_error_t SrsRtmpConn::on_disconnect()
|
|||
|
||||
http_hooks_on_close();
|
||||
|
||||
#ifdef SRS_AUTO_KAFKA
|
||||
if ((err = _srs_kafka->on_close(srs_id())) != srs_success) {
|
||||
return srs_error_wrap(err, "kafka on close");
|
||||
}
|
||||
#endif
|
||||
|
||||
// TODO: FIXME: Implements it.
|
||||
|
||||
return err;
|
||||
|
|
|
@ -54,9 +54,6 @@ class SrsSecurity;
|
|||
class ISrsWakable;
|
||||
class SrsCommonMessage;
|
||||
class SrsPacket;
|
||||
#ifdef SRS_AUTO_KAFKA
|
||||
class ISrsKafkaCluster;
|
||||
#endif
|
||||
|
||||
// The simple rtmp client for SRS.
|
||||
class SrsSimpleRtmpClient : public SrsBasicRtmpClient
|
||||
|
|
|
@ -49,7 +49,6 @@ using namespace std;
|
|||
#include <srs_app_caster_flv.hpp>
|
||||
#include <srs_core_mem_watch.hpp>
|
||||
#include <srs_kernel_consts.hpp>
|
||||
#include <srs_app_kafka.hpp>
|
||||
#include <srs_app_thread.hpp>
|
||||
#include <srs_app_coworkers.hpp>
|
||||
|
||||
|
@ -523,10 +522,6 @@ void SrsServer::dispose()
|
|||
|
||||
// @remark don't dispose ingesters, for too slow.
|
||||
|
||||
#ifdef SRS_AUTO_KAFKA
|
||||
srs_dispose_kafka();
|
||||
#endif
|
||||
|
||||
// dispose the source for hls and dvr.
|
||||
SrsSource::dispose_all();
|
||||
|
||||
|
@ -590,13 +585,6 @@ srs_error_t SrsServer::initialize_st()
|
|||
// set current log id.
|
||||
_srs_context->generate_id();
|
||||
|
||||
// initialize the conponents that depends on st.
|
||||
#ifdef SRS_AUTO_KAFKA
|
||||
if ((err = srs_initialize_kafka()) != srs_success) {
|
||||
return srs_error_wrap(err, "initialize kafka");
|
||||
}
|
||||
#endif
|
||||
|
||||
// check asprocess.
|
||||
bool asprocess = _srs_config->get_asprocess();
|
||||
if (asprocess && ppid == 1) {
|
||||
|
|
|
@ -50,9 +50,6 @@ class ISrsUdpHandler;
|
|||
class SrsUdpListener;
|
||||
class SrsTcpListener;
|
||||
class SrsAppCasterFlv;
|
||||
#ifdef SRS_AUTO_KAFKA
|
||||
class SrsKafkaProducer;
|
||||
#endif
|
||||
class SrsCoroutineManager;
|
||||
|
||||
// The listener type for server to identify the connection,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue