From cda61fe2d97bb9b415a3108040b06b6c8c6c0bfe Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Wed, 26 Nov 2014 13:14:18 -0800 Subject: [PATCH] docs and cleanup --- README.md | 2 +- node/AtomicCounter.hpp | 1 + node/Multicaster.cpp | 65 +++++++++----------------------------- node/OutboundMulticast.hpp | 10 +++--- 4 files changed, 21 insertions(+), 57 deletions(-) diff --git a/README.md b/README.md index ee258ae0..d83f777b 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -ZeroTier One - Planetary Scale Software Defined Networking +ZeroTier One - Network Virtualization Everywhere ====== ZeroTier One is an ethernet virtualization engine. It creates virtual switched LANs of almost unlimited size that span physical network boundaries. To the operating system these behave just like ordinary Ethernet ports. Everything just works, even as your computer moves around or your physical Internet link changes. diff --git a/node/AtomicCounter.hpp b/node/AtomicCounter.hpp index 80208eda..0dbcb1cd 100644 --- a/node/AtomicCounter.hpp +++ b/node/AtomicCounter.hpp @@ -111,6 +111,7 @@ private: #else int _v; #ifndef __GNUC__ +#warning Neither __WINDOWS__ nor __GNUC__ so AtomicCounter using Mutex Mutex _l; #endif #endif diff --git a/node/Multicaster.cpp b/node/Multicaster.cpp index 7b5c5e37..9955ce88 100644 --- a/node/Multicaster.cpp +++ b/node/Multicaster.cpp @@ -165,8 +165,8 @@ void Multicaster::send( MulticastGroupStatus &gs = _groups[std::pair(nwid,mg)]; if (!gs.members.empty()) { - // Use a stack-allocated buffer unless this multicast group is ridiculously huge - if (gs.members.size() > 8194) + // Allocate a memory buffer if group is monstrous + if (gs.members.size() > (sizeof(idxbuf) / sizeof(unsigned long))) indexes = new unsigned long[gs.members.size()]; // Generate a random permutation of member indexes @@ -181,9 +181,7 @@ void Multicaster::send( } if (gs.members.size() >= limit) { - // If we already have enough members, just send and we're done. We can - // skip the TX queue and skip the overhead of maintaining a send log by - // using sendOnly(). + // Skip queue if we already have enough members to complete the send operation OutboundMulticast out; out.init( @@ -202,33 +200,16 @@ void Multicaster::send( unsigned int count = 0; for(std::vector
::const_iterator ast(alwaysSendTo.begin());ast!=alwaysSendTo.end();++ast) { -#ifdef ZT_SUPPORT_LEGACY_MULTICAST - { - SharedPtr p(RR->topology->getPeer(*ast)); - if ((p)&&(p->remoteVersionKnown())&&(p->remoteVersionMajor() < 1)) - continue; - } -#endif - out.sendOnly(RR,*ast); if (++count >= limit) break; } unsigned long idx = 0; - while (count < limit) { // limit <= gs.members.size() so idx will never overflow here - const MulticastGroupMember &m = gs.members[indexes[idx++]]; - -#ifdef ZT_SUPPORT_LEGACY_MULTICAST - { - SharedPtr p(RR->topology->getPeer(m.address)); - if ((p)&&(p->remoteVersionKnown())&&(p->remoteVersionMajor() < 1)) - continue; - } -#endif - - if (std::find(alwaysSendTo.begin(),alwaysSendTo.end(),m.address) == alwaysSendTo.end()) { - out.sendOnly(RR,m.address); + while ((count < limit)&&(idx < gs.members.size())) { + Address ma(gs.members[indexes[idx++]].address); + if (std::find(alwaysSendTo.begin(),alwaysSendTo.end(),ma) == alwaysSendTo.end()) { + out.sendOnly(RR,ma); ++count; } } @@ -239,18 +220,18 @@ void Multicaster::send( gs.lastExplicitGather = now; SharedPtr sn(RR->topology->getBestSupernode()); if (sn) { - TRACE(">>MC GATHER up to %u in %.16llx/%s",gatherLimit,nwid,mg.toString().c_str()); + TRACE(">>MC upstream GATHER up to %u for group %.16llx/%s",gatherLimit,nwid,mg.toString().c_str()); Packet outp(sn->address(),RR->identity.address(),Packet::VERB_MULTICAST_GATHER); outp.append(nwid); outp.append((uint8_t)0); mg.mac().appendTo(outp); outp.append((uint32_t)mg.adi()); - outp.append((uint32_t)gatherLimit); // +1 just means we'll have an extra in the queue if available + outp.append((uint32_t)gatherLimit); outp.armor(sn->key(),true); sn->send(RR,outp.data(),outp.size(),now); } - gatherLimit = 1; // we still gather a bit from peers as well + gatherLimit = 0; } gs.txQueue.push_back(OutboundMulticast()); @@ -272,14 +253,6 @@ void Multicaster::send( unsigned int count = 0; for(std::vector
::const_iterator ast(alwaysSendTo.begin());ast!=alwaysSendTo.end();++ast) { -#ifdef ZT_SUPPORT_LEGACY_MULTICAST - { - SharedPtr p(RR->topology->getPeer(*ast)); - if ((p)&&(p->remoteVersionKnown())&&(p->remoteVersionMajor() < 1)) - continue; - } -#endif - out.sendAndLog(RR,*ast); if (++count >= limit) break; @@ -287,23 +260,15 @@ void Multicaster::send( unsigned long idx = 0; while ((count < limit)&&(idx < gs.members.size())) { - const MulticastGroupMember &m = gs.members[indexes[idx++]]; - -#ifdef ZT_SUPPORT_LEGACY_MULTICAST - { - SharedPtr p(RR->topology->getPeer(m.address)); - if ((p)&&(p->remoteVersionKnown())&&(p->remoteVersionMajor() < 1)) - continue; - } -#endif - - if (std::find(alwaysSendTo.begin(),alwaysSendTo.end(),m.address) == alwaysSendTo.end()) { - out.sendAndLog(RR,m.address); + Address ma(gs.members[indexes[idx++]].address); + if (std::find(alwaysSendTo.begin(),alwaysSendTo.end(),ma) == alwaysSendTo.end()) { + out.sendAndLog(RR,ma); ++count; } } } + // Free allocated memory buffer if any if (indexes != idxbuf) delete [] indexes; @@ -346,7 +311,7 @@ void Multicaster::send( sn->send(RR,outp.data(),outp.size(),now); } } -#endif +#endif // ZT_SUPPORT_LEGACY_MULTICAST } void Multicaster::clean(uint64_t now) diff --git a/node/OutboundMulticast.hpp b/node/OutboundMulticast.hpp index b3ca36a5..77e99377 100644 --- a/node/OutboundMulticast.hpp +++ b/node/OutboundMulticast.hpp @@ -133,12 +133,10 @@ public: */ inline bool sendIfNew(const RuntimeEnvironment *RR,const Address &toAddr) { - for(std::vector
::const_iterator a(_alreadySentTo.begin());a!=_alreadySentTo.end();++a) { - if (*a == toAddr) - return false; - } - sendAndLog(RR,toAddr); - return true; + if (std::find(_alreadySentTo.begin(),_alreadySentTo.end(),toAddr) == _alreadySentTo.end()) { + sendAndLog(RR,toAddr); + return true; + } else return false; } private: