Audio recording for podcasts on OSX

Re-coded this feature (which had been temporarily removed due to the
old code being reliant on the obsolete QuickTime library).
preferencesAboutTextFull
Craig Watson 10 years ago
parent 8b6a9279d3
commit 8a9ae09933
  1. 41
      src/podcast/quicktime/UBAudioQueueRecorder.cpp
  2. 22
      src/podcast/quicktime/UBAudioQueueRecorder.h
  3. 39
      src/podcast/quicktime/UBQuickTimeFile.h
  4. 172
      src/podcast/quicktime/UBQuickTimeFile.mm

@ -37,15 +37,22 @@ UBAudioQueueRecorder::UBAudioQueueRecorder(QObject* pParent)
, mIsRecording(false)
, mBufferLengthInMs(500)
{
int sampleSize = sizeof(float); // TODO: check if this is system/ microphone-dependant
sAudioFormat.mSampleRate = 44100.0;
sAudioFormat.mFormatID = kAudioFormatMPEG4AAC;
sAudioFormat.mChannelsPerFrame = 2;
sAudioFormat.mBytesPerFrame = 0;
sAudioFormat.mBitsPerChannel = 0;
sAudioFormat.mBytesPerPacket = 0;
sAudioFormat.mFramesPerPacket = 0;
sAudioFormat.mFormatFlags = 0;
sAudioFormat.mFormatID = kAudioFormatLinearPCM;
sAudioFormat.mChannelsPerFrame = 1;
sAudioFormat.mBytesPerFrame = sampleSize;
sAudioFormat.mBitsPerChannel = 8 * sampleSize;
sAudioFormat.mBytesPerPacket = sampleSize;
sAudioFormat.mFramesPerPacket = 1;
sAudioFormat.mFormatFlags = kAudioFormatFlagIsFloat |
kAudioFormatFlagsNativeEndian |
kAudioFormatFlagIsPacked;
}
@ -250,7 +257,7 @@ bool UBAudioQueueRecorder::init(const QString& waveInDeviceName)
int nbBuffers = 6;
mSampleBufferSize = sAudioFormat.mSampleRate * sAudioFormat.mChannelsPerFrame
* 2 * mBufferLengthInMs / 1000; // 44.1 Khz * stereo * 16bit * buffer length
* sAudioFormat.mChannelsPerFrame * mBufferLengthInMs / 1000; // 44.1 Khz * stereo * 16bit * buffer length
for (int i = 0; i < nbBuffers; i++)
{
@ -333,9 +340,12 @@ bool UBAudioQueueRecorder::close()
}
void UBAudioQueueRecorder::audioQueueInputCallback (void *inUserData, AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer, const AudioTimeStamp *inStartTime,
UInt32 inNumberPacketDescriptions, const AudioStreamPacketDescription *inPacketDescs)
void UBAudioQueueRecorder::audioQueueInputCallback (void *inUserData,
AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp *inStartTime,
UInt32 inNumberPacketDescriptions,
const AudioStreamPacketDescription *inPacketDescs)
{
Q_UNUSED(inAQ);
Q_UNUSED(inStartTime)
@ -356,12 +366,11 @@ void UBAudioQueueRecorder::audioQueueInputCallback (void *inUserData, AudioQueue
void UBAudioQueueRecorder::emitNewWaveBuffer(AudioQueueBufferRef pBuffer,
int inNumberPacketDescriptions, const AudioStreamPacketDescription *inPacketDescs)
int inNumberPacketDescriptions,
const AudioStreamPacketDescription *inPacketDescs)
{
AudioStreamPacketDescription* tmpPackages = (AudioStreamPacketDescription*)malloc(inNumberPacketDescriptions *sizeof(AudioStreamPacketDescription));
memcpy(tmpPackages,inPacketDescs,inNumberPacketDescriptions * sizeof(AudioStreamPacketDescription));
emit newWaveBuffer(pBuffer->mAudioData, pBuffer->mAudioDataByteSize, inNumberPacketDescriptions, tmpPackages);
emit newWaveBuffer(pBuffer->mAudioData, pBuffer->mAudioDataByteSize);
qreal level = 0;
UInt32 size;

@ -57,23 +57,29 @@ class UBAudioQueueRecorder : public QObject
return mLastErrorMessage;
}
static AudioStreamBasicDescription audioFormat()
AudioStreamBasicDescription * audioFormat()
{
return sAudioFormat;
return &sAudioFormat;
}
signals:
void newWaveBuffer(void* pBuffer, long pLength, int inNumberPacketDescriptions, const AudioStreamPacketDescription *inPacketDescs);
void newWaveBuffer(void* pBuffer,
long pLength);
void audioLevelChanged(quint8 level);
private:
static void audioQueueInputCallback (void *inUserData, AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer, const AudioTimeStamp *inStartTime,
UInt32 inNumberPacketDescriptions, const AudioStreamPacketDescription *inPacketDescs);
void emitNewWaveBuffer(AudioQueueBufferRef pBuffer, int inNumberPacketDescriptions, const AudioStreamPacketDescription *inPacketDescs);
static void audioQueueInputCallback (void *inUserData,
AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp *inStartTime,
UInt32 inNumberPacketDescriptions,
const AudioStreamPacketDescription *inPacketDescs);
void emitNewWaveBuffer(AudioQueueBufferRef pBuffer,
int inNumberPacketDescriptions,
const AudioStreamPacketDescription *inPacketDescs);
void emitAudioLevelChanged(quint8 level);

@ -31,6 +31,7 @@
#include <QtCore>
#include <CoreVideo/CoreVideo.h>
#include <CoreMedia/CoreMedia.h>
#include "UBAudioQueueRecorder.h"
@ -92,40 +93,50 @@ class UBQuickTimeFile : public QThread
protected:
void run();
private slots:
void appendAudioBuffer(void* pBuffer, long pLength);
private:
bool beginSession();
void appendVideoFrame(CVPixelBufferRef pixelBuffer, long msTimeStamp);
void setLastErrorMessage(const QString& error);
bool flushPendingFrames();
volatile CVPixelBufferPoolRef mCVPixelBufferPool;
void appendVideoFrame(CVPixelBufferRef pixelBuffer, long msTimeStamp);
bool flushPendingFrames();
volatile bool mShouldStopCompression;
volatile bool mCompressionSessionRunning;
volatile int mPendingFrames;
QString mSpatialQuality;
int mFramesPerSecond;
QSize mFrameSize;
QString mVideoFileName;
long mTimeScale;
bool mRecordAudio;
AssetWriterPTR mVideoWriter;
QString mLastErrorMessage;
AssetWriterInputPTR mVideoWriterInput;
AssetWriterInputAdaptorPTR mAdaptor;
QString mSpatialQuality;
AssetWriterInputPTR mAudioWriterInput;
volatile bool mShouldStopCompression;
volatile bool mCompressionSessionRunning;
QPointer<UBAudioQueueRecorder> mWaveRecorder;
CFAbsoluteTime mStartTime;
CMAudioFormatDescriptionRef mAudioFormatDescription;
long mTimeScale;
QString mLastErrorMessage;
QString mAudioRecordingDeviceName;
volatile int mPendingFrames;
AssetWriterPTR mVideoWriter;
AssetWriterInputPTR mVideoWriterInput;
AssetWriterInputAdaptorPTR mAdaptor;
};
#endif /* UBQUICKTIMEFILE_H_ */

@ -47,21 +47,20 @@ UBQuickTimeFile::UBQuickTimeFile(QObject * pParent)
, mVideoWriter(0)
, mVideoWriterInput(0)
, mAdaptor(0)
, mCVPixelBufferPool(0)
, mFramesPerSecond(-1)
, mTimeScale(100)
, mTimeScale(1000)
, mRecordAudio(true)
, mShouldStopCompression(false)
, mCompressionSessionRunning(false)
, mPendingFrames(0)
{
// NOOP
}
UBQuickTimeFile::~UBQuickTimeFile()
{
// NOOP
// destruction of mWaveRecorder is handled by endSession()
}
bool UBQuickTimeFile::init(const QString& pVideoFileName, const QString& pProfileData, int pFramesPerSecond
@ -126,7 +125,11 @@ void UBQuickTimeFile::run()
}
/**
* \brief Initialize the AVAssetWriter, which handles writing the media to file
* \brief Begin the recording session; initialize the audio/video writer
* \return true if the session was initialized successfully
*
* This function initializes the AVAssetWriter and associated video and audio inputs.
* Video is encoded as H264; audio is encoded as AAC.
*/
bool UBQuickTimeFile::beginSession()
{
@ -141,12 +144,17 @@ bool UBQuickTimeFile::beginSession()
// Create and check the assetWriter
mVideoWriter = [[AVAssetWriter assetWriterWithURL:outputUrl
fileType:AVFileTypeQuickTimeMovie
error:&outError] retain];
fileType:AVFileTypeQuickTimeMovie
error:&outError] retain];
NSCParameterAssert(mVideoWriter);
mVideoWriter.movieTimeScale = mTimeScale;
// Video
//
int frameWidth = mFrameSize.width();
int frameHeight = mFrameSize.height();
@ -175,18 +183,72 @@ bool UBQuickTimeFile::beginSession()
assetWriterInputPixelBufferAdaptorWithAssetWriterInput:mVideoWriterInput
sourcePixelBufferAttributes:pixelBufSettings] retain];
// Add the input(s) to the assetWriter
NSCParameterAssert([mVideoWriter canAddInput:mVideoWriterInput]);
[mVideoWriter addInput:mVideoWriterInput];
// begin the writing session
// Audio
//
if(mRecordAudio) {
mWaveRecorder = new UBAudioQueueRecorder();
// Get the audio format description from mWaveRecorder
CMAudioFormatDescriptionCreate(kCFAllocatorDefault, mWaveRecorder->audioFormat(),
0, NULL, 0, NULL, NULL,
&mAudioFormatDescription);
if(mWaveRecorder->init(mAudioRecordingDeviceName)) {
connect(mWaveRecorder, &UBAudioQueueRecorder::newWaveBuffer,
this, &UBQuickTimeFile::appendAudioBuffer);
connect(mWaveRecorder, SIGNAL(audioLevelChanged(quint8)),
this, SIGNAL(audioLevelChanged(quint8)));
}
else {
setLastErrorMessage(mWaveRecorder->lastErrorMessage());
mWaveRecorder->deleteLater();
mRecordAudio = false;
break;
}
// Audio is mono, and compressed to AAC at 128kbps
AudioChannelLayout audioChannelLayout = {
.mChannelLayoutTag = kAudioChannelLayoutTag_Mono,
.mChannelBitmap = 0,
.mNumberChannelDescriptions = 0
};
NSData *channelLayoutAsData = [NSData dataWithBytes:&audioChannelLayout
length:offsetof(AudioChannelLayout, mChannelDescriptions)];
NSDictionary * compressionAudioSettings = @{
AVFormatIDKey : [NSNumber numberWithUnsignedInt:kAudioFormatMPEG4AAC],
AVEncoderBitRateKey : [NSNumber numberWithInteger:128000],
AVSampleRateKey : [NSNumber numberWithInteger:44100],
AVChannelLayoutKey : channelLayoutAsData,
AVNumberOfChannelsKey : [NSNumber numberWithUnsignedInteger:1]
};
mAudioWriterInput = [[AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeAudio
outputSettings:compressionAudioSettings] retain];
NSCParameterAssert([mVideoWriter canAddInput:mAudioWriterInput]);
[mVideoWriter addInput:mAudioWriterInput];
qDebug() << "audio writer input created and added";
}
// Begin the writing session
bool canStartWriting = [mVideoWriter startWriting];
[mVideoWriter startSessionAtSourceTime:CMTimeMake(0, mTimeScale)];
// return true if everything was created and started successfully
mStartTime = CFAbsoluteTimeGetCurrent(); // used for audio timestamp calculation
return (mVideoWriter != nil) && (mVideoWriterInput != nil) && canStartWriting;
}
@ -201,10 +263,18 @@ void UBQuickTimeFile::endSession()
[mAdaptor release];
[mVideoWriterInput release];
[mVideoWriter release];
[mAudioWriterInput release];
mAdaptor = nil;
mVideoWriterInput = nil;
mVideoWriter = nil;
mAudioWriterInput = nil;
if (mWaveRecorder) {
mWaveRecorder->close();
mWaveRecorder->deleteLater();
}
}
/**
@ -235,11 +305,11 @@ CVPixelBufferRef UBQuickTimeFile::newPixelBuffer()
/**
* \brief Add a frame to the pixel buffer adaptor
* \param pixelBuffer The CVPixelBufferRef (video frame) to add to the movie
* \param msTimeStamp Timestamp, in milliseconds, of the frame
*/
void UBQuickTimeFile::appendVideoFrame(CVPixelBufferRef pixelBuffer, long msTimeStamp)
{
//qDebug() << "adding video frame at time: " << msTimeStamp;
CMTime t = CMTimeMake((msTimeStamp * mTimeScale / 1000.0), mTimeScale);
bool added = [mAdaptor appendPixelBuffer: pixelBuffer
@ -252,11 +322,79 @@ void UBQuickTimeFile::appendVideoFrame(CVPixelBufferRef pixelBuffer, long msTime
CVPixelBufferRelease(pixelBuffer);
}
/**
* \brief Append an AudioQueue Buffer to the audio AVAssetWriterInput
* \param pBuffer The AudioQueueBufferRef to add. Must be uncompressed (LPCM).
* \param pLength The length of the buffer, in Bytes
*
* This function serves as an interface between the low-level audio stream
* (implemented in the UBAudioQueueRecorder class) and the recording, handled
* by the AVAssetWriterInput instance mAudioWriterInput.
*/
void UBQuickTimeFile::appendAudioBuffer(void* pBuffer,
long pLength)
{
if(!mRecordAudio)
return;
// CMSampleBuffers require a CMBlockBuffer to hold the media data; we
// create a blockBuffer here from the AudioQueueBuffer's data.
CMBlockBufferRef blockBuffer;
CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault,
pBuffer,
pLength,
kCFAllocatorNull,
NULL,
0,
pLength,
kCMBlockBufferAssureMemoryNowFlag,
&blockBuffer);
// Timestamp of current sample
CFAbsoluteTime currentTime = CFAbsoluteTimeGetCurrent();
CFTimeInterval elapsedTime = currentTime - mStartTime;
CMTime timeStamp = CMTimeMake(elapsedTime * mTimeScale, mTimeScale);
// Number of samples in the buffer
long nSamples = pLength / mWaveRecorder->audioFormat()->mBytesPerFrame;
CMSampleBufferRef sampleBuffer;
CMAudioSampleBufferCreateWithPacketDescriptions(kCFAllocatorDefault,
blockBuffer,
true,
NULL,
NULL,
mAudioFormatDescription,
nSamples,
timeStamp,
NULL,
&sampleBuffer);
// Add the audio sample to the asset writer input
if ([mAudioWriterInput isReadyForMoreMediaData])
if(![mAudioWriterInput appendSampleBuffer:sampleBuffer])
setLastErrorMessage(QString("Failed to append sample buffer to audio input: %1").arg(err));
else
setLastErrorMessage(QString("Audio Writer not ready; sample dropped"));
CFRelease(sampleBuffer);
CFRelease(blockBuffer);
// The audioQueueBuffers are all freed when UBAudioQueueRecorder::close() is called
}
/**
* \brief Print an error message to the terminal, and store it
*/
void UBQuickTimeFile::setLastErrorMessage(const QString& error)
{
mLastErrorMessage = error;
qWarning() << "UBQuickTimeFile error" << error;
}

Loading…
Cancel
Save