13
0

Make InternalReturns collect their sends' data on the return's ::run(), rather than sends merging data with the InternalReturn on their ::run(). This makes internal send/return thread-safe so that N routes can send to 1 whilst they are being run in parallel process threads.

git-svn-id: svn://localhost/ardour2/branches/3.0@8904 d708f5d6-7413-0410-9779-e7cbd77b26cf
This commit is contained in:
Carl Hetherington 2011-02-20 00:55:52 +00:00
parent 13232d03f3
commit 8f0750cc7e
6 changed files with 44 additions and 53 deletions

View File

@ -27,12 +27,14 @@
namespace ARDOUR {
class InternalSend;
class InternalReturn : public Return
{
public:
InternalReturn (Session&);
XMLNode& state(bool full);
XMLNode& state(bool full);
XMLNode& get_state(void);
int set_state(const XMLNode&, int version);
@ -41,14 +43,16 @@ XMLNode& state(bool full);
bool can_support_io_configuration (const ChanCount& in, ChanCount& out) const;
int set_block_size (pframes_t);
BufferSet* get_buffers();
void release_buffers();
void add_send (InternalSend *);
void remove_send (InternalSend *);
static PBD::Signal1<void, pframes_t> CycleStart;
private:
BufferSet buffers;
gint user_count; /* atomic */
/** sends that we are receiving data from */
std::list<InternalSend*> _sends;
void allocate_buffers (pframes_t);
void cycle_start (pframes_t);
};

View File

@ -48,9 +48,12 @@ class InternalSend : public Send
boost::shared_ptr<Route> target_route() const { return _send_to; }
const PBD::ID& target_id() const { return _send_to_id; }
BufferSet const & get_buffers () const {
return mixbufs;
}
private:
BufferSet mixbufs;
BufferSet* target;
BufferSet mixbufs;
boost::shared_ptr<Route> _send_to;
PBD::ID _send_to_id;
PBD::ScopedConnection connect_c;

View File

@ -63,6 +63,7 @@ class InternalReturn;
class MonitorProcessor;
class Pannable;
class CapturingProcessor;
class InternalSend;
class Route : public SessionObject, public Automatable, public RouteGroupMember, public GraphNode
{
@ -218,8 +219,8 @@ class Route : public SessionObject, public Automatable, public RouteGroupMember,
boost::shared_ptr<MonitorProcessor> monitor_control() const { return _monitor_control; }
boost::shared_ptr<Send> internal_send_for (boost::shared_ptr<const Route> target) const;
void add_internal_return ();
BufferSet* get_return_buffer () const;
void release_return_buffer () const;
void add_send_to_internal_return (InternalSend *);
void remove_send_from_internal_return (InternalSend *);
void listen_position_changed ();
boost::shared_ptr<CapturingProcessor> add_export_point(/* Add some argument for placement later */);

View File

@ -23,6 +23,7 @@
#include "ardour/internal_return.h"
#include "ardour/mute_master.h"
#include "ardour/session.h"
#include "ardour/internal_send.h"
using namespace std;
using namespace ARDOUR;
@ -31,7 +32,6 @@ PBD::Signal1<void, pframes_t> InternalReturn::CycleStart;
InternalReturn::InternalReturn (Session& s)
: Return (s, true)
, user_count (0)
{
CycleStart.connect_same_thread (*this, boost::bind (&InternalReturn::cycle_start, this, _1));
_display_to_user = false;
@ -44,14 +44,14 @@ InternalReturn::run (BufferSet& bufs, framepos_t /*start_frame*/, framepos_t /*e
return;
}
/* no lock here, just atomic fetch */
/* _sends is only modified with the process lock held, so this is ok, I think */
if (g_atomic_int_get(&user_count) == 0) {
/* nothing to do - nobody is feeding us anything */
return;
for (list<InternalSend*>::iterator i = _sends.begin(); i != _sends.end(); ++i) {
if ((*i)->active ()) {
bufs.merge_from ((*i)->get_buffers(), nframes);
}
}
bufs.merge_from (buffers, nframes);
_active = _pending_active;
}
@ -77,34 +77,26 @@ InternalReturn::allocate_buffers (pframes_t nframes)
buffers.set_count (_configured_input);
}
BufferSet*
InternalReturn::get_buffers ()
void
InternalReturn::add_send (InternalSend* send)
{
Glib::Mutex::Lock lm (_session.engine().process_lock());
/* use of g_atomic here is just for code consistency - its protected by the lock
for writing.
*/
g_atomic_int_inc (&user_count);
return &buffers;
_sends.push_back (send);
}
void
InternalReturn::release_buffers ()
InternalReturn::remove_send (InternalSend* send)
{
Glib::Mutex::Lock lm (_session.engine().process_lock());
if (user_count) {
/* use of g_atomic here is just for code consistency - its protected by the lock
for writing.
*/
(void) g_atomic_int_dec_and_test (&user_count);
}
_sends.remove (send);
/* XXX: do we need to remove the connection to this send from _send_drop_references_connections ? */
}
void
InternalReturn::cycle_start (pframes_t nframes)
{
/* called from process cycle - no lock necessary */
if (user_count) {
if (!_sends.empty ()) {
/* don't bother with this if nobody is going to feed us anything */
buffers.silence (nframes, 0);
}
@ -138,3 +130,4 @@ InternalReturn::can_support_io_configuration (const ChanCount& in, ChanCount& ou
return true;
}

View File

@ -34,7 +34,6 @@ using namespace std;
InternalSend::InternalSend (Session& s, boost::shared_ptr<Pannable> p, boost::shared_ptr<MuteMaster> mm, boost::shared_ptr<Route> sendto, Delivery::Role role)
: Send (s, p, mm, role)
, target (0)
{
if (sendto) {
if (use_target (sendto)) {
@ -46,18 +45,20 @@ InternalSend::InternalSend (Session& s, boost::shared_ptr<Pannable> p, boost::sh
InternalSend::~InternalSend ()
{
if (_send_to) {
_send_to->release_return_buffer ();
_send_to->remove_send_from_internal_return (this);
}
}
int
InternalSend::use_target (boost::shared_ptr<Route> sendto)
{
if (_send_to) {
_send_to->remove_send_from_internal_return (this);
}
_send_to = sendto;
if ((target = _send_to->get_return_buffer ()) == 0) {
return -1;
}
_send_to->add_send_to_internal_return (this);
set_name (sendto->name());
_send_to_id = _send_to->id();
@ -74,7 +75,6 @@ InternalSend::use_target (boost::shared_ptr<Route> sendto)
void
InternalSend::send_to_going_away ()
{
target = 0;
target_connections.drop_connections ();
_send_to.reset ();
_send_to_id = "0";
@ -83,7 +83,7 @@ InternalSend::send_to_going_away ()
void
InternalSend::run (BufferSet& bufs, framepos_t start_frame, framepos_t end_frame, pframes_t nframes, bool)
{
if ((!_active && !_pending_active) || !target || !_send_to) {
if ((!_active && !_pending_active) || !_send_to) {
_meter->reset ();
return;
}
@ -138,9 +138,7 @@ InternalSend::run (BufferSet& bufs, framepos_t start_frame, framepos_t end_frame
}
}
/* deliver to target */
target->merge_from (mixbufs, nframes);
/* target will pick up our output when it is ready */
out:
_active = _pending_active;
@ -150,11 +148,6 @@ int
InternalSend::set_block_size (pframes_t nframes)
{
mixbufs.ensure_buffers (_configured_input, nframes);
/* ensure that our target can cope with us merging this many frames to it */
if (target) {
target->ensure_buffers (_configured_input, nframes);
}
return 0;
}

View File

@ -2465,8 +2465,8 @@ Route::add_internal_return ()
}
}
BufferSet*
Route::get_return_buffer () const
void
Route::add_send_to_internal_return (InternalSend* send)
{
Glib::RWLock::ReaderLock rm (_processor_lock);
@ -2474,16 +2474,13 @@ Route::get_return_buffer () const
boost::shared_ptr<InternalReturn> d = boost::dynamic_pointer_cast<InternalReturn>(*x);
if (d) {
BufferSet* bs = d->get_buffers ();
return bs;
return d->add_send (send);
}
}
return 0;
}
void
Route::release_return_buffer () const
Route::remove_send_from_internal_return (InternalSend* send)
{
Glib::RWLock::ReaderLock rm (_processor_lock);
@ -2491,7 +2488,7 @@ Route::release_return_buffer () const
boost::shared_ptr<InternalReturn> d = boost::dynamic_pointer_cast<InternalReturn>(*x);
if (d) {
return d->release_buffers ();
return d->remove_send (send);
}
}
}