13
0

Make InternalReturns collect their sends' data on the return's ::run(), rather than sends merging data with the InternalReturn on their ::run(). This makes internal send/return thread-safe so that N routes can send to 1 whilst they are being run in parallel process threads.

git-svn-id: svn://localhost/ardour2/branches/3.0@8904 d708f5d6-7413-0410-9779-e7cbd77b26cf
This commit is contained in:
Carl Hetherington 2011-02-20 00:55:52 +00:00
parent 13232d03f3
commit 8f0750cc7e
6 changed files with 44 additions and 53 deletions

View File

@ -27,12 +27,14 @@
namespace ARDOUR { namespace ARDOUR {
class InternalSend;
class InternalReturn : public Return class InternalReturn : public Return
{ {
public: public:
InternalReturn (Session&); InternalReturn (Session&);
XMLNode& state(bool full); XMLNode& state(bool full);
XMLNode& get_state(void); XMLNode& get_state(void);
int set_state(const XMLNode&, int version); int set_state(const XMLNode&, int version);
@ -41,14 +43,16 @@ XMLNode& state(bool full);
bool can_support_io_configuration (const ChanCount& in, ChanCount& out) const; bool can_support_io_configuration (const ChanCount& in, ChanCount& out) const;
int set_block_size (pframes_t); int set_block_size (pframes_t);
BufferSet* get_buffers(); void add_send (InternalSend *);
void release_buffers(); void remove_send (InternalSend *);
static PBD::Signal1<void, pframes_t> CycleStart; static PBD::Signal1<void, pframes_t> CycleStart;
private: private:
BufferSet buffers; BufferSet buffers;
gint user_count; /* atomic */ /** sends that we are receiving data from */
std::list<InternalSend*> _sends;
void allocate_buffers (pframes_t); void allocate_buffers (pframes_t);
void cycle_start (pframes_t); void cycle_start (pframes_t);
}; };

View File

@ -48,9 +48,12 @@ class InternalSend : public Send
boost::shared_ptr<Route> target_route() const { return _send_to; } boost::shared_ptr<Route> target_route() const { return _send_to; }
const PBD::ID& target_id() const { return _send_to_id; } const PBD::ID& target_id() const { return _send_to_id; }
BufferSet const & get_buffers () const {
return mixbufs;
}
private: private:
BufferSet mixbufs; BufferSet mixbufs;
BufferSet* target;
boost::shared_ptr<Route> _send_to; boost::shared_ptr<Route> _send_to;
PBD::ID _send_to_id; PBD::ID _send_to_id;
PBD::ScopedConnection connect_c; PBD::ScopedConnection connect_c;

View File

@ -63,6 +63,7 @@ class InternalReturn;
class MonitorProcessor; class MonitorProcessor;
class Pannable; class Pannable;
class CapturingProcessor; class CapturingProcessor;
class InternalSend;
class Route : public SessionObject, public Automatable, public RouteGroupMember, public GraphNode class Route : public SessionObject, public Automatable, public RouteGroupMember, public GraphNode
{ {
@ -218,8 +219,8 @@ class Route : public SessionObject, public Automatable, public RouteGroupMember,
boost::shared_ptr<MonitorProcessor> monitor_control() const { return _monitor_control; } boost::shared_ptr<MonitorProcessor> monitor_control() const { return _monitor_control; }
boost::shared_ptr<Send> internal_send_for (boost::shared_ptr<const Route> target) const; boost::shared_ptr<Send> internal_send_for (boost::shared_ptr<const Route> target) const;
void add_internal_return (); void add_internal_return ();
BufferSet* get_return_buffer () const; void add_send_to_internal_return (InternalSend *);
void release_return_buffer () const; void remove_send_from_internal_return (InternalSend *);
void listen_position_changed (); void listen_position_changed ();
boost::shared_ptr<CapturingProcessor> add_export_point(/* Add some argument for placement later */); boost::shared_ptr<CapturingProcessor> add_export_point(/* Add some argument for placement later */);

View File

@ -23,6 +23,7 @@
#include "ardour/internal_return.h" #include "ardour/internal_return.h"
#include "ardour/mute_master.h" #include "ardour/mute_master.h"
#include "ardour/session.h" #include "ardour/session.h"
#include "ardour/internal_send.h"
using namespace std; using namespace std;
using namespace ARDOUR; using namespace ARDOUR;
@ -31,7 +32,6 @@ PBD::Signal1<void, pframes_t> InternalReturn::CycleStart;
InternalReturn::InternalReturn (Session& s) InternalReturn::InternalReturn (Session& s)
: Return (s, true) : Return (s, true)
, user_count (0)
{ {
CycleStart.connect_same_thread (*this, boost::bind (&InternalReturn::cycle_start, this, _1)); CycleStart.connect_same_thread (*this, boost::bind (&InternalReturn::cycle_start, this, _1));
_display_to_user = false; _display_to_user = false;
@ -44,14 +44,14 @@ InternalReturn::run (BufferSet& bufs, framepos_t /*start_frame*/, framepos_t /*e
return; return;
} }
/* no lock here, just atomic fetch */ /* _sends is only modified with the process lock held, so this is ok, I think */
if (g_atomic_int_get(&user_count) == 0) { for (list<InternalSend*>::iterator i = _sends.begin(); i != _sends.end(); ++i) {
/* nothing to do - nobody is feeding us anything */ if ((*i)->active ()) {
return; bufs.merge_from ((*i)->get_buffers(), nframes);
}
} }
bufs.merge_from (buffers, nframes);
_active = _pending_active; _active = _pending_active;
} }
@ -77,34 +77,26 @@ InternalReturn::allocate_buffers (pframes_t nframes)
buffers.set_count (_configured_input); buffers.set_count (_configured_input);
} }
BufferSet* void
InternalReturn::get_buffers () InternalReturn::add_send (InternalSend* send)
{ {
Glib::Mutex::Lock lm (_session.engine().process_lock()); Glib::Mutex::Lock lm (_session.engine().process_lock());
/* use of g_atomic here is just for code consistency - its protected by the lock _sends.push_back (send);
for writing.
*/
g_atomic_int_inc (&user_count);
return &buffers;
} }
void void
InternalReturn::release_buffers () InternalReturn::remove_send (InternalSend* send)
{ {
Glib::Mutex::Lock lm (_session.engine().process_lock()); Glib::Mutex::Lock lm (_session.engine().process_lock());
if (user_count) { _sends.remove (send);
/* use of g_atomic here is just for code consistency - its protected by the lock /* XXX: do we need to remove the connection to this send from _send_drop_references_connections ? */
for writing.
*/
(void) g_atomic_int_dec_and_test (&user_count);
}
} }
void void
InternalReturn::cycle_start (pframes_t nframes) InternalReturn::cycle_start (pframes_t nframes)
{ {
/* called from process cycle - no lock necessary */ /* called from process cycle - no lock necessary */
if (user_count) { if (!_sends.empty ()) {
/* don't bother with this if nobody is going to feed us anything */ /* don't bother with this if nobody is going to feed us anything */
buffers.silence (nframes, 0); buffers.silence (nframes, 0);
} }
@ -138,3 +130,4 @@ InternalReturn::can_support_io_configuration (const ChanCount& in, ChanCount& ou
return true; return true;
} }

View File

@ -34,7 +34,6 @@ using namespace std;
InternalSend::InternalSend (Session& s, boost::shared_ptr<Pannable> p, boost::shared_ptr<MuteMaster> mm, boost::shared_ptr<Route> sendto, Delivery::Role role) InternalSend::InternalSend (Session& s, boost::shared_ptr<Pannable> p, boost::shared_ptr<MuteMaster> mm, boost::shared_ptr<Route> sendto, Delivery::Role role)
: Send (s, p, mm, role) : Send (s, p, mm, role)
, target (0)
{ {
if (sendto) { if (sendto) {
if (use_target (sendto)) { if (use_target (sendto)) {
@ -46,18 +45,20 @@ InternalSend::InternalSend (Session& s, boost::shared_ptr<Pannable> p, boost::sh
InternalSend::~InternalSend () InternalSend::~InternalSend ()
{ {
if (_send_to) { if (_send_to) {
_send_to->release_return_buffer (); _send_to->remove_send_from_internal_return (this);
} }
} }
int int
InternalSend::use_target (boost::shared_ptr<Route> sendto) InternalSend::use_target (boost::shared_ptr<Route> sendto)
{ {
if (_send_to) {
_send_to->remove_send_from_internal_return (this);
}
_send_to = sendto; _send_to = sendto;
if ((target = _send_to->get_return_buffer ()) == 0) { _send_to->add_send_to_internal_return (this);
return -1;
}
set_name (sendto->name()); set_name (sendto->name());
_send_to_id = _send_to->id(); _send_to_id = _send_to->id();
@ -74,7 +75,6 @@ InternalSend::use_target (boost::shared_ptr<Route> sendto)
void void
InternalSend::send_to_going_away () InternalSend::send_to_going_away ()
{ {
target = 0;
target_connections.drop_connections (); target_connections.drop_connections ();
_send_to.reset (); _send_to.reset ();
_send_to_id = "0"; _send_to_id = "0";
@ -83,7 +83,7 @@ InternalSend::send_to_going_away ()
void void
InternalSend::run (BufferSet& bufs, framepos_t start_frame, framepos_t end_frame, pframes_t nframes, bool) InternalSend::run (BufferSet& bufs, framepos_t start_frame, framepos_t end_frame, pframes_t nframes, bool)
{ {
if ((!_active && !_pending_active) || !target || !_send_to) { if ((!_active && !_pending_active) || !_send_to) {
_meter->reset (); _meter->reset ();
return; return;
} }
@ -138,9 +138,7 @@ InternalSend::run (BufferSet& bufs, framepos_t start_frame, framepos_t end_frame
} }
} }
/* deliver to target */ /* target will pick up our output when it is ready */
target->merge_from (mixbufs, nframes);
out: out:
_active = _pending_active; _active = _pending_active;
@ -150,11 +148,6 @@ int
InternalSend::set_block_size (pframes_t nframes) InternalSend::set_block_size (pframes_t nframes)
{ {
mixbufs.ensure_buffers (_configured_input, nframes); mixbufs.ensure_buffers (_configured_input, nframes);
/* ensure that our target can cope with us merging this many frames to it */
if (target) {
target->ensure_buffers (_configured_input, nframes);
}
return 0; return 0;
} }

View File

@ -2465,8 +2465,8 @@ Route::add_internal_return ()
} }
} }
BufferSet* void
Route::get_return_buffer () const Route::add_send_to_internal_return (InternalSend* send)
{ {
Glib::RWLock::ReaderLock rm (_processor_lock); Glib::RWLock::ReaderLock rm (_processor_lock);
@ -2474,16 +2474,13 @@ Route::get_return_buffer () const
boost::shared_ptr<InternalReturn> d = boost::dynamic_pointer_cast<InternalReturn>(*x); boost::shared_ptr<InternalReturn> d = boost::dynamic_pointer_cast<InternalReturn>(*x);
if (d) { if (d) {
BufferSet* bs = d->get_buffers (); return d->add_send (send);
return bs;
} }
} }
return 0;
} }
void void
Route::release_return_buffer () const Route::remove_send_from_internal_return (InternalSend* send)
{ {
Glib::RWLock::ReaderLock rm (_processor_lock); Glib::RWLock::ReaderLock rm (_processor_lock);
@ -2491,7 +2488,7 @@ Route::release_return_buffer () const
boost::shared_ptr<InternalReturn> d = boost::dynamic_pointer_cast<InternalReturn>(*x); boost::shared_ptr<InternalReturn> d = boost::dynamic_pointer_cast<InternalReturn>(*x);
if (d) { if (d) {
return d->release_buffers (); return d->remove_send (send);
} }
} }
} }