13
0

Thin out qm-dsp code: no threading

This commit is contained in:
Robin Gareus 2016-10-06 00:51:32 +02:00
parent ee2a1b7bea
commit a543ae329c
8 changed files with 0 additions and 1617 deletions

View File

@ -17,9 +17,6 @@ maths/pca.c -- Fionn Murtagh, from StatLib; with permission
maths/Polyfit.h -- Allen Miller, David J Taylor and others; also for
Delphi in the the JEDI Math Library, under the Mozilla Public License
thread/BlockAllocator.h -- derived from FSB Allocator by Juha Nieminen,
under BSD-style license
See individual files for further authorship details.

View File

@ -1,416 +0,0 @@
/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */
/*
QM DSP Library
Centre for Digital Music, Queen Mary, University of London.
This file by Chris Cannam.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version. See the file
COPYING included with this distribution for more information.
*/
#include "Resampler.h"
#include "maths/MathUtilities.h"
#include "base/KaiserWindow.h"
#include "base/SincWindow.h"
#include "thread/Thread.h"
#include <iostream>
#include <vector>
#include <map>
#include <cassert>
using std::vector;
using std::map;
using std::cerr;
using std::endl;
//#define DEBUG_RESAMPLER 1
//#define DEBUG_RESAMPLER_VERBOSE 1
Resampler::Resampler(int sourceRate, int targetRate) :
m_sourceRate(sourceRate),
m_targetRate(targetRate)
{
initialise(100, 0.02);
}
Resampler::Resampler(int sourceRate, int targetRate,
double snr, double bandwidth) :
m_sourceRate(sourceRate),
m_targetRate(targetRate)
{
initialise(snr, bandwidth);
}
Resampler::~Resampler()
{
delete[] m_phaseData;
}
// peakToPole -> length -> beta -> window
static map<double, map<int, map<double, vector<double> > > >
knownFilters;
static Mutex
knownFilterMutex;
void
Resampler::initialise(double snr, double bandwidth)
{
int higher = std::max(m_sourceRate, m_targetRate);
int lower = std::min(m_sourceRate, m_targetRate);
m_gcd = MathUtilities::gcd(lower, higher);
m_peakToPole = higher / m_gcd;
if (m_targetRate < m_sourceRate) {
// antialiasing filter, should be slightly below nyquist
m_peakToPole = m_peakToPole / (1.0 - bandwidth/2.0);
}
KaiserWindow::Parameters params =
KaiserWindow::parametersForBandwidth(snr, bandwidth, higher / m_gcd);
params.length =
(params.length % 2 == 0 ? params.length + 1 : params.length);
params.length =
(params.length > 200001 ? 200001 : params.length);
m_filterLength = params.length;
vector<double> filter;
knownFilterMutex.lock();
if (knownFilters[m_peakToPole][m_filterLength].find(params.beta) ==
knownFilters[m_peakToPole][m_filterLength].end()) {
KaiserWindow kw(params);
SincWindow sw(m_filterLength, m_peakToPole * 2);
filter = vector<double>(m_filterLength, 0.0);
for (int i = 0; i < m_filterLength; ++i) filter[i] = 1.0;
sw.cut(filter.data());
kw.cut(filter.data());
knownFilters[m_peakToPole][m_filterLength][params.beta] = filter;
}
filter = knownFilters[m_peakToPole][m_filterLength][params.beta];
knownFilterMutex.unlock();
int inputSpacing = m_targetRate / m_gcd;
int outputSpacing = m_sourceRate / m_gcd;
#ifdef DEBUG_RESAMPLER
cerr << "resample " << m_sourceRate << " -> " << m_targetRate
<< ": inputSpacing " << inputSpacing << ", outputSpacing "
<< outputSpacing << ": filter length " << m_filterLength
<< endl;
#endif
// Now we have a filter of (odd) length flen in which the lower
// sample rate corresponds to every n'th point and the higher rate
// to every m'th where n and m are higher and lower rates divided
// by their gcd respectively. So if x coordinates are on the same
// scale as our filter resolution, then source sample i is at i *
// (targetRate / gcd) and target sample j is at j * (sourceRate /
// gcd).
// To reconstruct a single target sample, we want a buffer (real
// or virtual) of flen values formed of source samples spaced at
// intervals of (targetRate / gcd), in our example case 3. This
// is initially formed with the first sample at the filter peak.
//
// 0 0 0 0 a 0 0 b 0
//
// and of course we have our filter
//
// f1 f2 f3 f4 f5 f6 f7 f8 f9
//
// We take the sum of products of non-zero values from this buffer
// with corresponding values in the filter
//
// a * f5 + b * f8
//
// Then we drop (sourceRate / gcd) values, in our example case 4,
// from the start of the buffer and fill until it has flen values
// again
//
// a 0 0 b 0 0 c 0 0
//
// repeat to reconstruct the next target sample
//
// a * f1 + b * f4 + c * f7
//
// and so on.
//
// Above I said the buffer could be "real or virtual" -- ours is
// virtual. We don't actually store all the zero spacing values,
// except for padding at the start; normally we store only the
// values that actually came from the source stream, along with a
// phase value that tells us how many virtual zeroes there are at
// the start of the virtual buffer. So the two examples above are
//
// 0 a b [ with phase 1 ]
// a b c [ with phase 0 ]
//
// Having thus broken down the buffer so that only the elements we
// need to multiply are present, we can also unzip the filter into
// every-nth-element subsets at each phase, allowing us to do the
// filter multiplication as a simply vector multiply. That is, rather
// than store
//
// f1 f2 f3 f4 f5 f6 f7 f8 f9
//
// we store separately
//
// f1 f4 f7
// f2 f5 f8
// f3 f6 f9
//
// Each time we complete a multiply-and-sum, we need to work out
// how many (real) samples to drop from the start of our buffer,
// and how many to add at the end of it for the next multiply. We
// know we want to drop enough real samples to move along by one
// computed output sample, which is our outputSpacing number of
// virtual buffer samples. Depending on the relationship between
// input and output spacings, this may mean dropping several real
// samples, one real sample, or none at all (and simply moving to
// a different "phase").
m_phaseData = new Phase[inputSpacing];
for (int phase = 0; phase < inputSpacing; ++phase) {
Phase p;
p.nextPhase = phase - outputSpacing;
while (p.nextPhase < 0) p.nextPhase += inputSpacing;
p.nextPhase %= inputSpacing;
p.drop = int(ceil(std::max(0.0, double(outputSpacing - phase))
/ inputSpacing));
int filtZipLength = int(ceil(double(m_filterLength - phase)
/ inputSpacing));
for (int i = 0; i < filtZipLength; ++i) {
p.filter.push_back(filter[i * inputSpacing + phase]);
}
m_phaseData[phase] = p;
}
#ifdef DEBUG_RESAMPLER
int cp = 0;
int totDrop = 0;
for (int i = 0; i < inputSpacing; ++i) {
cerr << "phase = " << cp << ", drop = " << m_phaseData[cp].drop
<< ", filter length = " << m_phaseData[cp].filter.size()
<< ", next phase = " << m_phaseData[cp].nextPhase << endl;
totDrop += m_phaseData[cp].drop;
cp = m_phaseData[cp].nextPhase;
}
cerr << "total drop = " << totDrop << endl;
#endif
// The May implementation of this uses a pull model -- we ask the
// resampler for a certain number of output samples, and it asks
// its source stream for as many as it needs to calculate
// those. This means (among other things) that the source stream
// can be asked for enough samples up-front to fill the buffer
// before the first output sample is generated.
//
// In this implementation we're using a push model in which a
// certain number of source samples is provided and we're asked
// for as many output samples as that makes available. But we
// can't return any samples from the beginning until half the
// filter length has been provided as input. This means we must
// either return a very variable number of samples (none at all
// until the filter fills, then half the filter length at once) or
// else have a lengthy declared latency on the output. We do the
// latter. (What do other implementations do?)
//
// We want to make sure the first "real" sample will eventually be
// aligned with the centre sample in the filter (it's tidier, and
// easier to do diagnostic calculations that way). So we need to
// pick the initial phase and buffer fill accordingly.
//
// Example: if the inputSpacing is 2, outputSpacing is 3, and
// filter length is 7,
//
// x x x x a b c ... input samples
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 ...
// i j k l ... output samples
// [--------|--------] <- filter with centre mark
//
// Let h be the index of the centre mark, here 3 (generally
// int(filterLength/2) for odd-length filters).
//
// The smallest n such that h + n * outputSpacing > filterLength
// is 2 (that is, ceil((filterLength - h) / outputSpacing)), and
// (h + 2 * outputSpacing) % inputSpacing == 1, so the initial
// phase is 1.
//
// To achieve our n, we need to pre-fill the "virtual" buffer with
// 4 zero samples: the x's above. This is int((h + n *
// outputSpacing) / inputSpacing). It's the phase that makes this
// buffer get dealt with in such a way as to give us an effective
// index for sample a of 9 rather than 8 or 10 or whatever.
//
// This gives us output latency of 2 (== n), i.e. output samples i
// and j will appear before the one in which input sample a is at
// the centre of the filter.
int h = int(m_filterLength / 2);
int n = ceil(double(m_filterLength - h) / outputSpacing);
m_phase = (h + n * outputSpacing) % inputSpacing;
int fill = (h + n * outputSpacing) / inputSpacing;
m_latency = n;
m_buffer = vector<double>(fill, 0);
m_bufferOrigin = 0;
#ifdef DEBUG_RESAMPLER
cerr << "initial phase " << m_phase << " (as " << (m_filterLength/2) << " % " << inputSpacing << ")"
<< ", latency " << m_latency << endl;
#endif
}
double
Resampler::reconstructOne()
{
Phase &pd = m_phaseData[m_phase];
double v = 0.0;
int n = pd.filter.size();
assert(n + m_bufferOrigin <= (int)m_buffer.size());
const double *const __restrict__ buf = m_buffer.data() + m_bufferOrigin;
const double *const __restrict__ filt = pd.filter.data();
for (int i = 0; i < n; ++i) {
// NB gcc can only vectorize this with -ffast-math
v += buf[i] * filt[i];
}
m_bufferOrigin += pd.drop;
m_phase = pd.nextPhase;
return v;
}
int
Resampler::process(const double *src, double *dst, int n)
{
for (int i = 0; i < n; ++i) {
m_buffer.push_back(src[i]);
}
int maxout = int(ceil(double(n) * m_targetRate / m_sourceRate));
int outidx = 0;
#ifdef DEBUG_RESAMPLER
cerr << "process: buf siz " << m_buffer.size() << " filt siz for phase " << m_phase << " " << m_phaseData[m_phase].filter.size() << endl;
#endif
double scaleFactor = (double(m_targetRate) / m_gcd) / m_peakToPole;
while (outidx < maxout &&
m_buffer.size() >= m_phaseData[m_phase].filter.size() + m_bufferOrigin) {
dst[outidx] = scaleFactor * reconstructOne();
outidx++;
}
m_buffer = vector<double>(m_buffer.begin() + m_bufferOrigin, m_buffer.end());
m_bufferOrigin = 0;
return outidx;
}
vector<double>
Resampler::process(const double *src, int n)
{
int maxout = int(ceil(double(n) * m_targetRate / m_sourceRate));
vector<double> out(maxout, 0.0);
int got = process(src, out.data(), n);
assert(got <= maxout);
if (got < maxout) out.resize(got);
return out;
}
vector<double>
Resampler::resample(int sourceRate, int targetRate, const double *data, int n)
{
Resampler r(sourceRate, targetRate);
int latency = r.getLatency();
// latency is the output latency. We need to provide enough
// padding input samples at the end of input to guarantee at
// *least* the latency's worth of output samples. that is,
int inputPad = int(ceil((double(latency) * sourceRate) / targetRate));
// that means we are providing this much input in total:
int n1 = n + inputPad;
// and obtaining this much output in total:
int m1 = int(ceil((double(n1) * targetRate) / sourceRate));
// in order to return this much output to the user:
int m = int(ceil((double(n) * targetRate) / sourceRate));
#ifdef DEBUG_RESAMPLER
cerr << "n = " << n << ", sourceRate = " << sourceRate << ", targetRate = " << targetRate << ", m = " << m << ", latency = " << latency << ", inputPad = " << inputPad << ", m1 = " << m1 << ", n1 = " << n1 << ", n1 - n = " << n1 - n << endl;
#endif
vector<double> pad(n1 - n, 0.0);
vector<double> out(m1 + 1, 0.0);
int gotData = r.process(data, out.data(), n);
int gotPad = r.process(pad.data(), out.data() + gotData, pad.size());
int got = gotData + gotPad;
#ifdef DEBUG_RESAMPLER
cerr << "resample: " << n << " in, " << pad.size() << " padding, " << got << " out (" << gotData << " data, " << gotPad << " padding, latency = " << latency << ")" << endl;
#endif
#ifdef DEBUG_RESAMPLER_VERBOSE
int printN = 50;
cerr << "first " << printN << " in:" << endl;
for (int i = 0; i < printN && i < n; ++i) {
if (i % 5 == 0) cerr << endl << i << "... ";
cerr << data[i] << " ";
}
cerr << endl;
#endif
int toReturn = got - latency;
if (toReturn > m) toReturn = m;
vector<double> sliced(out.begin() + latency,
out.begin() + latency + toReturn);
#ifdef DEBUG_RESAMPLER_VERBOSE
cerr << "first " << printN << " out (after latency compensation), length " << sliced.size() << ":";
for (int i = 0; i < printN && i < sliced.size(); ++i) {
if (i % 5 == 0) cerr << endl << i << "... ";
cerr << sliced[i] << " ";
}
cerr << endl;
#endif
return sliced;
}

View File

@ -1,102 +0,0 @@
/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */
/*
QM DSP Library
Centre for Digital Music, Queen Mary, University of London.
This file by Chris Cannam.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version. See the file
COPYING included with this distribution for more information.
*/
#ifndef RESAMPLER_H
#define RESAMPLER_H
#include <vector>
/**
* Resampler resamples a stream from one integer sample rate to
* another (arbitrary) rate, using a kaiser-windowed sinc filter. The
* results and performance are pretty similar to libraries such as
* libsamplerate, though this implementation does not support
* time-varying ratios (the ratio is fixed on construction).
*
* See also Decimator, which is faster and rougher but supports only
* power-of-two downsampling factors.
*/
class Resampler
{
public:
/**
* Construct a Resampler to resample from sourceRate to
* targetRate.
*/
Resampler(int sourceRate, int targetRate);
/**
* Construct a Resampler to resample from sourceRate to
* targetRate, using the given filter parameters.
*/
Resampler(int sourceRate, int targetRate,
double snr, double bandwidth);
virtual ~Resampler();
/**
* Read n input samples from src and write resampled data to
* dst. The return value is the number of samples written, which
* will be no more than ceil((n * targetRate) / sourceRate). The
* caller must ensure the dst buffer has enough space for the
* samples returned.
*/
int process(const double *src, double *dst, int n);
/**
* Read n input samples from src and return resampled data by
* value.
*/
std::vector<double> process(const double *src, int n);
/**
* Return the number of samples of latency at the output due by
* the filter. (That is, the output will be delayed by this number
* of samples relative to the input.)
*/
int getLatency() const { return m_latency; }
/**
* Carry out a one-off resample of a single block of n
* samples. The output is latency-compensated.
*/
static std::vector<double> resample
(int sourceRate, int targetRate, const double *data, int n);
private:
int m_sourceRate;
int m_targetRate;
int m_gcd;
int m_filterLength;
int m_bufferLength;
int m_latency;
double m_peakToPole;
struct Phase {
int nextPhase;
std::vector<double> filter;
int drop;
};
Phase *m_phaseData;
int m_phase;
std::vector<double> m_buffer;
int m_bufferOrigin;
void initialise(double, double);
double reconstructOne();
};
#endif

View File

@ -1,110 +0,0 @@
/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */
/*
QM DSP Library
Centre for Digital Music, Queen Mary, University of London.
This file Copyright 2009 QMUL.
*/
#ifndef _ASYNCHRONOUS_TASK_H_
#define _ASYNCHRONOUS_TASK_H_
#include "Thread.h"
#include <iostream>
/**
* AsynchronousTask provides a thread pattern implementation for
* threads which are used to perform a series of similar operations in
* parallel with other threads of the same type.
*
* For example, a thread used to calculate FFTs of a particular block
* size in the context of a class that needs to calculate many block
* sizes of FFT at once may be a candidate for an AsynchronousTask.
*
* The general use pattern is:
*
* caller -> request thread A calculate something
* caller -> request thread B calculate something
* caller -> request thread C calculate something
* caller -> wait for threads A, B, and C
*
* Here threads A, B, and C may be AsynchronousTasks. An important
* point is that the caller must be prepared to block when waiting for
* these threads to complete (i.e. they are started asynchronously,
* but testing for completion is synchronous).
*/
class AsynchronousTask : public Thread
{
public:
AsynchronousTask() :
m_todo("AsynchronousTask: task to perform"),
m_done("AsynchronousTask: task complete"),
m_inTask(false),
m_finishing(false)
{
start();
}
virtual ~AsynchronousTask()
{
m_todo.lock();
m_finishing = true;
m_todo.signal();
m_todo.unlock();
wait();
}
// Subclass must provide methods to request task and obtain
// results, which the caller calls. The method that requests a
// new task should set up any internal state and call startTask(),
// which then calls back on the subclass implementation of
// performTask from within its work thread. The method that
// obtains results should call awaitTask() and then return any
// results from internal state.
protected:
void startTask() {
m_done.lock();
m_todo.lock();
m_inTask = true;
m_todo.signal();
m_todo.unlock();
}
void awaitTask() {
m_done.wait();
m_done.unlock();
}
virtual void performTask() = 0;
private:
virtual void run() {
m_todo.lock();
while (1) {
while (!m_inTask && !m_finishing) {
m_todo.wait();
}
if (m_finishing) {
m_done.lock();
m_inTask = false;
m_done.signal();
m_done.unlock();
break;
}
performTask();
m_done.lock();
m_inTask = false;
m_done.signal();
m_done.unlock();
}
m_todo.unlock();
}
Condition m_todo;
Condition m_done;
bool m_inTask;
bool m_finishing;
};
#endif

View File

@ -1,189 +0,0 @@
/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */
/*
QM DSP Library
Centre for Digital Music, Queen Mary, University of London.
This file is derived from the FSB Allocator by Juha Nieminen. The
underlying method is unchanged, but the class has been refactored
to permit multiple separate allocators (e.g. one per thread)
rather than use a single global one (and to fit house style).
Copyright (c) 2008 Juha Nieminen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#ifndef _BLOCK_ALLOCATOR_H_
#define _BLOCK_ALLOCATOR_H_
#include <cstdlib>
/**
* BlockAllocator is a simple allocator for fixed-size (usually small)
* chunks of memory. The size of an element is specified in the
* BlockAllocator constructor, and the functions allocate() and
* deallocate() are used to obtain and release a single element at a
* time.
*
* BlockAllocator may be an appropriate class to use in situations
* involving a very large number of allocations and deallocations of
* simple, identical objects across multiple threads (a hard situation
* for a generic system malloc implementation to handle well). Retain
* one BlockAllocator per thread (the class itself is not
* thread-safe), and ensure that each thread uses its own allocator
* exclusively.
*
* BlockAllocator is based on Juha Nieminen's more general
* FSBAllocator.
*/
class BlockAllocator
{
public:
typedef std::size_t data_t;
BlockAllocator(int elementSize) : m_sz(elementSize) { }
void *
allocate()
{
if (m_freelist.empty()) {
m_freelist.push_back(m_blocks.data.size());
m_blocks.data.push_back(Block(this));
}
const data_t index = m_freelist.back();
Block &block = m_blocks.data[index];
void *retval = block.allocate(index);
if (block.isFull()) m_freelist.pop_back();
return retval;
}
void
deallocate(void *ptr)
{
if (!ptr) return;
data_t *unitPtr = (data_t *)ptr;
const data_t blockIndex = unitPtr[elementSizeInDataUnits()];
Block& block = m_blocks.data[blockIndex];
if (block.isFull()) m_freelist.push_back(blockIndex);
block.deallocate(unitPtr);
}
private:
inline data_t elementsPerBlock() const {
return 512;
}
inline data_t dataSize() const {
return sizeof(data_t);
}
inline data_t elementSizeInDataUnits() const {
return (m_sz + (dataSize() - 1)) / dataSize();
}
inline data_t unitSizeInDataUnits() const {
return elementSizeInDataUnits() + 1;
}
inline data_t blockSizeInDataUnits() const {
return elementsPerBlock() * unitSizeInDataUnits();
}
class Block
{
public:
Block(BlockAllocator *a) :
m_a(a),
m_block(0),
m_firstFreeUnit(data_t(-1)),
m_allocated(0),
m_end(0)
{}
~Block() {
delete[] m_block;
}
bool isFull() const {
return m_allocated == m_a->elementsPerBlock();
}
void clear() {
delete[] m_block;
m_block = 0;
m_firstFreeUnit = data_t(-1);
}
void *allocate(data_t index) {
if (m_firstFreeUnit == data_t(-1)) {
if (!m_block) {
m_block = new data_t[m_a->blockSizeInDataUnits()];
m_end = 0;
}
data_t *retval = m_block + m_end;
m_end += m_a->unitSizeInDataUnits();
retval[m_a->elementSizeInDataUnits()] = index;
++m_allocated;
return retval;
} else {
data_t *retval = m_block + m_firstFreeUnit;
m_firstFreeUnit = *retval;
++m_allocated;
return retval;
}
}
void deallocate(data_t *ptr) {
*ptr = m_firstFreeUnit;
m_firstFreeUnit = ptr - m_block;
if (--m_allocated == 0) clear();
}
private:
const BlockAllocator *m_a;
data_t *m_block;
data_t m_firstFreeUnit;
data_t m_allocated;
data_t m_end;
};
struct Blocks
{
std::vector<Block> data;
Blocks() {
data.reserve(1024);
}
};
const int m_sz;
Blocks m_blocks;
std::vector<data_t> m_freelist;
};
#endif

View File

@ -1,643 +0,0 @@
/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */
/*
QM DSP Library
Centre for Digital Music, Queen Mary, University of London.
This file copyright Chris Cannam, used with permission.
*/
#include "Thread.h"
#include <iostream>
#include <cstdlib>
#ifdef USE_PTHREADS
#include <sys/time.h>
#include <time.h>
#endif
using std::cerr;
using std::endl;
using std::string;
#ifdef _WIN32
Thread::Thread() :
m_id(0),
m_extant(false)
{
#ifdef DEBUG_THREAD
cerr << "THREAD DEBUG: Created thread object " << this << endl;
#endif
}
Thread::~Thread()
{
#ifdef DEBUG_THREAD
cerr << "THREAD DEBUG: Destroying thread object " << this << ", id " << m_id << endl;
#endif
if (m_extant) {
WaitForSingleObject(m_id, INFINITE);
}
#ifdef DEBUG_THREAD
cerr << "THREAD DEBUG: Destroyed thread object " << this << endl;
#endif
}
void
Thread::start()
{
m_id = CreateThread(NULL, 0, staticRun, this, 0, 0);
if (!m_id) {
cerr << "ERROR: thread creation failed" << endl;
exit(1);
} else {
#ifdef DEBUG_THREAD
cerr << "THREAD DEBUG: Created thread " << m_id << " for thread object " << this << endl;
#endif
m_extant = true;
}
}
void
Thread::wait()
{
if (m_extant) {
#ifdef DEBUG_THREAD
cerr << "THREAD DEBUG: Waiting on thread " << m_id << " for thread object " << this << endl;
#endif
WaitForSingleObject(m_id, INFINITE);
#ifdef DEBUG_THREAD
cerr << "THREAD DEBUG: Waited on thread " << m_id << " for thread object " << this << endl;
#endif
m_extant = false;
}
}
Thread::Id
Thread::id()
{
return m_id;
}
bool
Thread::threadingAvailable()
{
return true;
}
DWORD
Thread::staticRun(LPVOID arg)
{
Thread *thread = static_cast<Thread *>(arg);
#ifdef DEBUG_THREAD
cerr << "THREAD DEBUG: " << (void *)GetCurrentThreadId() << ": Running thread " << thread->m_id << " for thread object " << thread << endl;
#endif
thread->run();
return 0;
}
Mutex::Mutex()
#ifndef NO_THREAD_CHECKS
:
m_lockedBy(-1)
#endif
{
m_mutex = CreateMutex(NULL, FALSE, NULL);
#ifdef DEBUG_MUTEX
cerr << "MUTEX DEBUG: " << (void *)GetCurrentThreadId() << ": Initialised mutex " << &m_mutex << endl;
#endif
}
Mutex::~Mutex()
{
#ifdef DEBUG_MUTEX
cerr << "MUTEX DEBUG: " << (void *)GetCurrentThreadId() << ": Destroying mutex " << &m_mutex << endl;
#endif
CloseHandle(m_mutex);
}
void
Mutex::lock()
{
#ifndef NO_THREAD_CHECKS
DWORD tid = GetCurrentThreadId();
if (m_lockedBy == tid) {
cerr << "ERROR: Deadlock on mutex " << &m_mutex << endl;
}
#endif
#ifdef DEBUG_MUTEX
cerr << "MUTEX DEBUG: " << (void *)tid << ": Want to lock mutex " << &m_mutex << endl;
#endif
WaitForSingleObject(m_mutex, INFINITE);
#ifndef NO_THREAD_CHECKS
m_lockedBy = tid;
#endif
#ifdef DEBUG_MUTEX
cerr << "MUTEX DEBUG: " << (void *)tid << ": Locked mutex " << &m_mutex << endl;
#endif
}
void
Mutex::unlock()
{
#ifndef NO_THREAD_CHECKS
DWORD tid = GetCurrentThreadId();
if (m_lockedBy != tid) {
cerr << "ERROR: Mutex " << &m_mutex << " not owned by unlocking thread" << endl;
return;
}
#endif
#ifdef DEBUG_MUTEX
cerr << "MUTEX DEBUG: " << (void *)tid << ": Unlocking mutex " << &m_mutex << endl;
#endif
#ifndef NO_THREAD_CHECKS
m_lockedBy = -1;
#endif
ReleaseMutex(m_mutex);
}
bool
Mutex::trylock()
{
#ifndef NO_THREAD_CHECKS
DWORD tid = GetCurrentThreadId();
#endif
DWORD result = WaitForSingleObject(m_mutex, 0);
if (result == WAIT_TIMEOUT || result == WAIT_FAILED) {
#ifdef DEBUG_MUTEX
cerr << "MUTEX DEBUG: " << (void *)tid << ": Mutex " << &m_mutex << " unavailable" << endl;
#endif
return false;
} else {
#ifndef NO_THREAD_CHECKS
m_lockedBy = tid;
#endif
#ifdef DEBUG_MUTEX
cerr << "MUTEX DEBUG: " << (void *)tid << ": Locked mutex " << &m_mutex << " (from trylock)" << endl;
#endif
return true;
}
}
Condition::Condition(string name) :
m_locked(false)
#ifdef DEBUG_CONDITION
, m_name(name)
#endif
{
m_mutex = CreateMutex(NULL, FALSE, NULL);
m_condition = CreateEvent(NULL, FALSE, FALSE, NULL);
#ifdef DEBUG_CONDITION
cerr << "CONDITION DEBUG: " << (void *)GetCurrentThreadId() << ": Initialised condition " << &m_condition << " \"" << m_name << "\"" << endl;
#endif
}
Condition::~Condition()
{
#ifdef DEBUG_CONDITION
cerr << "CONDITION DEBUG: " << (void *)GetCurrentThreadId() << ": Destroying condition " << &m_condition << " \"" << m_name << "\"" << endl;
#endif
if (m_locked) ReleaseMutex(m_mutex);
CloseHandle(m_condition);
CloseHandle(m_mutex);
}
void
Condition::lock()
{
#ifdef DEBUG_CONDITION
cerr << "CONDITION DEBUG: " << (void *)GetCurrentThreadId() << ": Want to lock " << &m_condition << " \"" << m_name << "\"" << endl;
#endif
WaitForSingleObject(m_mutex, INFINITE);
m_locked = true;
#ifdef DEBUG_CONDITION
cerr << "CONDITION DEBUG: " << (void *)GetCurrentThreadId() << ": Locked " << &m_condition << " \"" << m_name << "\"" << endl;
#endif
}
void
Condition::unlock()
{
if (!m_locked) {
#ifdef DEBUG_CONDITION
cerr << "CONDITION DEBUG: " << (void *)GetCurrentThreadId() << ": Not locked " << &m_condition << " \"" << m_name << "\"" << endl;
#endif
return;
}
#ifdef DEBUG_CONDITION
cerr << "CONDITION DEBUG: " << (void *)GetCurrentThreadId() << ": Unlocking " << &m_condition << " \"" << m_name << "\"" << endl;
#endif
m_locked = false;
ReleaseMutex(m_mutex);
}
void
Condition::wait(int us)
{
if (us == 0) {
#ifdef DEBUG_CONDITION
cerr << "CONDITION DEBUG: " << (void *)GetCurrentThreadId() << ": Waiting on " << &m_condition << " \"" << m_name << "\"" << endl;
#endif
SignalObjectAndWait(m_mutex, m_condition, INFINITE, FALSE);
WaitForSingleObject(m_mutex, INFINITE);
} else {
DWORD ms = us / 1000;
if (us > 0 && ms == 0) ms = 1;
#ifdef DEBUG_CONDITION
cerr << "CONDITION DEBUG: " << (void *)GetCurrentThreadId() << ": Timed waiting on " << &m_condition << " \"" << m_name << "\"" << endl;
#endif
SignalObjectAndWait(m_mutex, m_condition, ms, FALSE);
WaitForSingleObject(m_mutex, INFINITE);
}
#ifdef DEBUG_CONDITION
cerr << "CONDITION DEBUG: " << (void *)GetCurrentThreadId() << ": Wait done on " << &m_condition << " \"" << m_name << "\"" << endl;
#endif
m_locked = true;
}
void
Condition::signal()
{
#ifdef DEBUG_CONDITION
cerr << "CONDITION DEBUG: " << (void *)GetCurrentThreadId() << ": Signalling " << &m_condition << " \"" << m_name << "\"" << endl;
#endif
SetEvent(m_condition);
}
#else /* !_WIN32 */
#ifdef USE_PTHREADS
Thread::Thread() :
m_id(0),
m_extant(false)
{
#ifdef DEBUG_THREAD
cerr << "THREAD DEBUG: Created thread object " << this << endl;
#endif
}
Thread::~Thread()
{
#ifdef DEBUG_THREAD
cerr << "THREAD DEBUG: Destroying thread object " << this << ", id " << m_id << endl;
#endif
if (m_extant) {
pthread_join(m_id, 0);
}
#ifdef DEBUG_THREAD
cerr << "THREAD DEBUG: Destroyed thread object " << this << endl;
#endif
}
void
Thread::start()
{
if (pthread_create(&m_id, 0, staticRun, this)) {
cerr << "ERROR: thread creation failed" << endl;
exit(1);
} else {
#ifdef DEBUG_THREAD
cerr << "THREAD DEBUG: Created thread " << m_id << " for thread object " << this << endl;
#endif
m_extant = true;
}
}
void
Thread::wait()
{
if (m_extant) {
#ifdef DEBUG_THREAD
cerr << "THREAD DEBUG: Waiting on thread " << m_id << " for thread object " << this << endl;
#endif
pthread_join(m_id, 0);
#ifdef DEBUG_THREAD
cerr << "THREAD DEBUG: Waited on thread " << m_id << " for thread object " << this << endl;
#endif
m_extant = false;
}
}
Thread::Id
Thread::id()
{
return m_id;
}
bool
Thread::threadingAvailable()
{
return true;
}
void *
Thread::staticRun(void *arg)
{
Thread *thread = static_cast<Thread *>(arg);
#ifdef DEBUG_THREAD
cerr << "THREAD DEBUG: " << (void *)pthread_self() << ": Running thread " << thread->m_id << " for thread object " << thread << endl;
#endif
thread->run();
return 0;
}
Mutex::Mutex()
#ifndef NO_THREAD_CHECKS
:
m_lockedBy(0),
m_locked(false)
#endif
{
pthread_mutex_init(&m_mutex, 0);
#ifdef DEBUG_MUTEX
cerr << "MUTEX DEBUG: " << (void *)pthread_self() << ": Initialised mutex " << &m_mutex << endl;
#endif
}
Mutex::~Mutex()
{
#ifdef DEBUG_MUTEX
cerr << "MUTEX DEBUG: " << (void *)pthread_self() << ": Destroying mutex " << &m_mutex << endl;
#endif
pthread_mutex_destroy(&m_mutex);
}
void
Mutex::lock()
{
#ifndef NO_THREAD_CHECKS
pthread_t tid = pthread_self();
if (m_locked && m_lockedBy == tid) {
cerr << "ERROR: Deadlock on mutex " << &m_mutex << endl;
}
#endif
#ifdef DEBUG_MUTEX
cerr << "MUTEX DEBUG: " << (void *)tid << ": Want to lock mutex " << &m_mutex << endl;
#endif
pthread_mutex_lock(&m_mutex);
#ifndef NO_THREAD_CHECKS
m_lockedBy = tid;
m_locked = true;
#endif
#ifdef DEBUG_MUTEX
cerr << "MUTEX DEBUG: " << (void *)tid << ": Locked mutex " << &m_mutex << endl;
#endif
}
void
Mutex::unlock()
{
#ifndef NO_THREAD_CHECKS
pthread_t tid = pthread_self();
if (!m_locked) {
cerr << "ERROR: Mutex " << &m_mutex << " not locked in unlock" << endl;
return;
} else if (m_lockedBy != tid) {
cerr << "ERROR: Mutex " << &m_mutex << " not owned by unlocking thread" << endl;
return;
}
#endif
#ifdef DEBUG_MUTEX
cerr << "MUTEX DEBUG: " << (void *)tid << ": Unlocking mutex " << &m_mutex << endl;
#endif
#ifndef NO_THREAD_CHECKS
m_locked = false;
#endif
pthread_mutex_unlock(&m_mutex);
}
bool
Mutex::trylock()
{
#ifndef NO_THREAD_CHECKS
pthread_t tid = pthread_self();
#endif
if (pthread_mutex_trylock(&m_mutex)) {
#ifdef DEBUG_MUTEX
cerr << "MUTEX DEBUG: " << (void *)tid << ": Mutex " << &m_mutex << " unavailable" << endl;
#endif
return false;
} else {
#ifndef NO_THREAD_CHECKS
m_lockedBy = tid;
m_locked = true;
#endif
#ifdef DEBUG_MUTEX
cerr << "MUTEX DEBUG: " << (void *)tid << ": Locked mutex " << &m_mutex << " (from trylock)" << endl;
#endif
return true;
}
}
Condition::Condition(string name) :
m_locked(false)
#ifdef DEBUG_CONDITION
, m_name(name)
#endif
{
pthread_mutex_init(&m_mutex, 0);
pthread_cond_init(&m_condition, 0);
#ifdef DEBUG_CONDITION
cerr << "CONDITION DEBUG: " << (void *)pthread_self() << ": Initialised condition " << &m_condition << " \"" << m_name << "\"" << endl;
#endif
}
Condition::~Condition()
{
#ifdef DEBUG_CONDITION
cerr << "CONDITION DEBUG: " << (void *)pthread_self() << ": Destroying condition " << &m_condition << " \"" << m_name << "\"" << endl;
#endif
if (m_locked) pthread_mutex_unlock(&m_mutex);
pthread_cond_destroy(&m_condition);
pthread_mutex_destroy(&m_mutex);
}
void
Condition::lock()
{
#ifdef DEBUG_CONDITION
cerr << "CONDITION DEBUG: " << (void *)pthread_self() << ": Want to lock " << &m_condition << " \"" << m_name << "\"" << endl;
#endif
pthread_mutex_lock(&m_mutex);
m_locked = true;
#ifdef DEBUG_CONDITION
cerr << "CONDITION DEBUG: " << (void *)pthread_self() << ": Locked " << &m_condition << " \"" << m_name << "\"" << endl;
#endif
}
void
Condition::unlock()
{
if (!m_locked) {
#ifdef DEBUG_CONDITION
cerr << "CONDITION DEBUG: " << (void *)pthread_self() << ": Not locked " << &m_condition << " \"" << m_name << "\"" << endl;
#endif
return;
}
#ifdef DEBUG_CONDITION
cerr << "CONDITION DEBUG: " << (void *)pthread_self() << ": Unlocking " << &m_condition << " \"" << m_name << "\"" << endl;
#endif
m_locked = false;
pthread_mutex_unlock(&m_mutex);
}
void
Condition::wait(int us)
{
if (us == 0) {
#ifdef DEBUG_CONDITION
cerr << "CONDITION DEBUG: " << (void *)pthread_self() << ": Waiting on " << &m_condition << " \"" << m_name << "\"" << endl;
#endif
pthread_cond_wait(&m_condition, &m_mutex);
} else {
struct timeval now;
gettimeofday(&now, 0);
now.tv_usec += us;
while (now.tv_usec > 1000000) {
now.tv_usec -= 1000000;
++now.tv_sec;
}
struct timespec timeout;
timeout.tv_sec = now.tv_sec;
timeout.tv_nsec = now.tv_usec * 1000;
#ifdef DEBUG_CONDITION
cerr << "CONDITION DEBUG: " << (void *)pthread_self() << ": Timed waiting on " << &m_condition << " \"" << m_name << "\"" << endl;
#endif
pthread_cond_timedwait(&m_condition, &m_mutex, &timeout);
}
#ifdef DEBUG_CONDITION
cerr << "CONDITION DEBUG: " << (void *)pthread_self() << ": Wait done on " << &m_condition << " \"" << m_name << "\"" << endl;
#endif
m_locked = true;
}
void
Condition::signal()
{
#ifdef DEBUG_CONDITION
cerr << "CONDITION DEBUG: " << (void *)pthread_self() << ": Signalling " << &m_condition << " \"" << m_name << "\"" << endl;
#endif
pthread_cond_signal(&m_condition);
}
#else /* !USE_PTHREADS */
Thread::Thread()
{
}
Thread::~Thread()
{
}
void
Thread::start()
{
abort();
}
void
Thread::wait()
{
abort();
}
Thread::Id
Thread::id()
{
abort();
}
bool
Thread::threadingAvailable()
{
return false;
}
Mutex::Mutex()
{
}
Mutex::~Mutex()
{
}
void
Mutex::lock()
{
abort();
}
void
Mutex::unlock()
{
abort();
}
bool
Mutex::trylock()
{
abort();
}
Condition::Condition(const char *)
{
}
Condition::~Condition()
{
}
void
Condition::lock()
{
abort();
}
void
Condition::wait(int us)
{
abort();
}
void
Condition::signal()
{
abort();
}
#endif /* !USE_PTHREADS */
#endif /* !_WIN32 */
MutexLocker::MutexLocker(Mutex *mutex) :
m_mutex(mutex)
{
if (m_mutex) {
m_mutex->lock();
}
}
MutexLocker::~MutexLocker()
{
if (m_mutex) {
m_mutex->unlock();
}
}

View File

@ -1,152 +0,0 @@
/* -*- c-basic-offset: 4 indent-tabs-mode: nil -*- vi:set ts=8 sts=4 sw=4: */
/*
QM DSP Library
Centre for Digital Music, Queen Mary, University of London.
This file copyright Chris Cannam, used with permission.
*/
#ifndef _THREAD_H_
#define _THREAD_H_
#ifdef _WIN32
#include <windows.h>
#else /* !_WIN32 */
#define USE_PTHREADS
#ifdef USE_PTHREADS
#include <pthread.h>
#else
#error Must have either _WIN32 or USE_PTHREADS defined
#endif /* USE_PTHREADS */
#endif /* !_WIN32 */
#include <string>
//#define DEBUG_THREAD 1
//#define DEBUG_MUTEX 1
//#define DEBUG_CONDITION 1
class Thread
{
public:
#ifdef _WIN32
typedef HANDLE Id;
#else
#ifdef USE_PTHREADS
typedef pthread_t Id;
#endif
#endif
Thread();
virtual ~Thread();
Id id();
void start();
void wait();
static bool threadingAvailable();
protected:
virtual void run() = 0;
private:
#ifdef _WIN32
HANDLE m_id;
bool m_extant;
static DWORD WINAPI staticRun(LPVOID lpParam);
#else
#ifdef USE_PTHREADS
pthread_t m_id;
bool m_extant;
static void *staticRun(void *);
#endif
#endif
};
class Mutex
{
public:
Mutex();
~Mutex();
void lock();
void unlock();
bool trylock();
private:
#ifdef _WIN32
HANDLE m_mutex;
#ifndef NO_THREAD_CHECKS
DWORD m_lockedBy;
#endif
#else
#ifdef USE_PTHREADS
pthread_mutex_t m_mutex;
#ifndef NO_THREAD_CHECKS
pthread_t m_lockedBy;
bool m_locked;
#endif
#endif
#endif
};
class MutexLocker
{
public:
MutexLocker(Mutex *);
~MutexLocker();
private:
Mutex *m_mutex;
};
class Condition
{
public:
Condition(std::string name);
~Condition();
// Condition bundles a pthread-style condition variable and mutex
// into one class.
// To wait on a condition, call lock(), test termination variables
// as appropriate, and then wait(). The condition will be
// unlocked for the duration of the wait() call, which will end
// when the condition is signalled. The condition will be locked
// again when wait() returns.
//
// To signal a condition, call signal(). If the waiting thread
// will be performing tests between its own lock() and wait(),
// then the signalling thread should also lock() before it signals
// (and then unlock afterwards). If the signalling thread always
// locks the mutex during signalling, then the waiting thread
// knows that signals will only happen during wait() and not be
// missed at other times.
void lock();
void unlock();
void wait(int us = 0);
void signal();
private:
#ifdef _WIN32
HANDLE m_mutex;
HANDLE m_condition;
bool m_locked;
#else
#ifdef USE_PTHREADS
pthread_mutex_t m_mutex;
pthread_cond_t m_condition;
bool m_locked;
#endif
#endif
#ifdef DEBUG_CONDITION
std::string m_name;
#endif
};
#endif

View File

@ -49,7 +49,6 @@ def build(bld):
dsp/phasevocoder/PhaseVocoder.cpp
dsp/rateconversion/Decimator.cpp
dsp/rateconversion/DecimatorB.cpp
dsp/rateconversion/Resampler.cpp
dsp/rhythm/BeatSpectrum.cpp
dsp/segmentation/cluster_melt.c
dsp/segmentation/ClusterMeltSegmenter.cpp
@ -73,7 +72,6 @@ def build(bld):
maths/KLDivergence.cpp
maths/MathUtilities.cpp
maths/pca/pca.c
thread/Thread.cpp
ext/kissfft/kiss_fft.c
ext/kissfft/tools/kiss_fftr.c
'''