Podcasts on Linux: added audio support

preferencesAboutTextFull
Craig Watson 9 years ago
parent a2fb735bbc
commit 11c207d7ee
  1. 3
      src/podcast/UBPodcastController.cpp
  2. 338
      src/podcast/ffmpeg/UBFFmpegVideoEncoder.cpp
  3. 48
      src/podcast/ffmpeg/UBFFmpegVideoEncoder.h
  4. 211
      src/podcast/ffmpeg/UBMicrophoneInput.cpp
  5. 57
      src/podcast/ffmpeg/UBMicrophoneInput.h
  6. 6
      src/podcast/podcast.pri

@ -66,6 +66,7 @@
#include "quicktime/UBAudioQueueRecorder.h"
#elif defined(Q_OS_LINUX)
#include "ffmpeg/UBFFmpegVideoEncoder.h"
#include "ffmpeg/UBMicrophoneInput.h"
#endif
#include "core/memcheck.h"
@ -808,6 +809,8 @@ QStringList UBPodcastController::audioRecordingDevices()
devices = UBWaveRecorder::waveInDevices();
#elif defined(Q_OS_OSX)
devices = UBAudioQueueRecorder::waveInDevices();
#elif defined(Q_OS_LINUX)
devices = UBMicrophoneInput::availableDevicesNames();
#endif
return devices;

@ -20,18 +20,32 @@ QString avErrorToQString(int errnum)
*
* This class provides an interface between the screencast controller and the ffmpeg
* back-end. It initializes the audio and video encoders and frees them when done;
* worker threads handle the actual encoding of frames.
* a worker thread handles the actual encoding and writing of frames.
*
*/
UBFFmpegVideoEncoder::UBFFmpegVideoEncoder(QObject* parent)
: UBAbstractVideoEncoder(parent)
, mOutputFormatContext(NULL)
, mSwsContext(NULL)
, mFile(NULL)
, mShouldRecordAudio(true)
, mAudioInput(NULL)
, mSwrContext(NULL)
, mAudioOutBuffer(NULL)
, mAudioSampleRate(44100)
, mAudioFrameCount(0)
{
if (mShouldRecordAudio) {
mAudioInput = new UBMicrophoneInput();
mTimebase = 100 * framesPerSecond();
qDebug() << "timebase: " << mTimebase;
connect(mAudioInput, SIGNAL(audioLevelChanged(quint8)),
this, SIGNAL(audioLevelChanged(quint8)));
connect(mAudioInput, SIGNAL(dataAvailable(QByteArray)),
this, SLOT(onAudioAvailable(QByteArray)));
}
mVideoTimebase = 100 * framesPerSecond();
qDebug() << "timebase: " << mVideoTimebase;
mVideoEncoderThread = new QThread;
mVideoWorker = new UBFFmpegVideoEncoderWorker(this);
@ -58,6 +72,8 @@ UBFFmpegVideoEncoder::~UBFFmpegVideoEncoder()
if (mVideoEncoderThread)
delete mVideoEncoderThread;
if (mAudioInput)
delete mAudioInput;
}
void UBFFmpegVideoEncoder::setLastErrorMessage(const QString& pMessage)
@ -66,12 +82,16 @@ void UBFFmpegVideoEncoder::setLastErrorMessage(const QString& pMessage)
mLastErrorMessage = pMessage;
}
bool UBFFmpegVideoEncoder::start()
{
bool initialized = init();
if (initialized)
if (initialized) {
mVideoEncoderThread->start();
if (mShouldRecordAudio)
mAudioInput->start();
}
return initialized;
}
@ -82,12 +102,14 @@ bool UBFFmpegVideoEncoder::stop()
mVideoWorker->stopEncoding();
if (mShouldRecordAudio)
mAudioInput->stop();
return true;
}
bool UBFFmpegVideoEncoder::init()
{
// Initialize ffmpeg lib
av_register_all();
avcodec_register_all();
@ -96,7 +118,6 @@ bool UBFFmpegVideoEncoder::init()
// Output format and context
// --------------------------------------
if (avformat_alloc_output_context2(&mOutputFormatContext, NULL,
"mp4", NULL) < 0)
{
@ -109,6 +130,7 @@ bool UBFFmpegVideoEncoder::init()
// Video codec and context
// -------------------------------------
mVideoStream = avformat_new_stream(mOutputFormatContext, 0);
AVCodec * videoCodec = avcodec_find_encoder(mOutputFormatContext->oformat->video_codec);
if (!videoCodec) {
@ -116,16 +138,12 @@ bool UBFFmpegVideoEncoder::init()
return false;
}
mVideoStream = avformat_new_stream(mOutputFormatContext, 0);
mVideoStream->time_base = {1, mTimebase};
avcodec_get_context_defaults3(mVideoStream->codec, videoCodec);
AVCodecContext* c = avcodec_alloc_context3(videoCodec);
c->bit_rate = videoBitsPerSecond();
c->width = videoSize().width();
c->height = videoSize().height();
c->time_base = {1, mTimebase};
c->time_base = {1, mVideoTimebase};
c->gop_size = 10;
c->max_b_frames = 0;
c->pix_fmt = AV_PIX_FMT_YUV420P;
@ -161,10 +179,77 @@ bool UBFFmpegVideoEncoder::init()
// Audio codec and context
// -------------------------------------
/*
if (mShouldRecordAudio) {
// Microphone input
if (!mAudioInput->init()) {
setLastErrorMessage("Couldn't initialize audio input");
return false;
}
int inChannelCount = mAudioInput->channelCount();
int inSampleRate = mAudioInput->sampleRate();
int inSampleSize = mAudioInput->sampleSize();
qDebug() << "inChannelCount = " << inChannelCount;
qDebug() << "inSampleRate = " << inSampleRate;
qDebug() << "inSampleSize = " << inSampleSize;
// Codec
AVCodec * audioCodec = avcodec_find_encoder(mOutputFormatContext->oformat->audio_codec);
if (!audioCodec) {
setLastErrorMessage("Audio codec not found");
return false;
}
mAudioStream = avformat_new_stream(mOutputFormatContext, audioCodec);
*/
mAudioStream->id = mOutputFormatContext->nb_streams-1;
c = mAudioStream->codec;
c->bit_rate = 96000;
c->sample_fmt = audioCodec->sample_fmts[0]; // FLTP by default for AAC
c->sample_rate = mAudioSampleRate;
c->channels = 2;
c->channel_layout = av_get_default_channel_layout(c->channels);
c->profile = FF_PROFILE_AAC_MAIN;
c->time_base = {1, mAudioSampleRate};
if (mOutputFormatContext->oformat->flags & AVFMT_GLOBALHEADER)
c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
ret = avcodec_open2(c, audioCodec, NULL);
if (ret < 0) {
setLastErrorMessage(QString("Couldn't open audio codec: ") + avErrorToQString(ret));
return false;
}
// Resampling / format converting context
mSwrContext = swr_alloc();
if (!mSwrContext) {
setLastErrorMessage("Could not allocate resampler context");
return false;
}
av_opt_set_int(mSwrContext, "in_channel_count", inChannelCount, 0);
av_opt_set_int(mSwrContext, "in_sample_rate", inSampleRate, 0);
av_opt_set_sample_fmt(mSwrContext, "in_sample_fmt", (AVSampleFormat)mAudioInput->sampleFormat(), 0);
av_opt_set_int(mSwrContext, "out_channel_count", c->channels, 0);
av_opt_set_int(mSwrContext, "out_sample_rate", c->sample_rate, 0);
av_opt_set_sample_fmt(mSwrContext, "out_sample_fmt", c->sample_fmt, 0);
ret = swr_init(mSwrContext);
if (ret < 0) {
setLastErrorMessage(QString("Couldn't initialize the resampling context: ") + avErrorToQString(ret));
return false;
}
// Buffer for resampled/converted audio
mAudioOutBuffer = av_audio_fifo_alloc(c->sample_fmt, c->channels, c->frame_size);
}
// Open the output file
@ -185,8 +270,14 @@ bool UBFFmpegVideoEncoder::init()
return true;
}
/**
* This function should be called every time a new "screenshot" is ready.
* The image is converted to the right format and sent to the encoder.
*/
void UBFFmpegVideoEncoder::newPixmap(const QImage &pImage, long timestamp)
{
// really necessary?
static bool isFirstFrame = true;
if (isFirstFrame) {
timestamp = 0;
@ -201,16 +292,16 @@ void UBFFmpegVideoEncoder::newPixmap(const QImage &pImage, long timestamp)
else {
// First send any queued frames, then the latest one
while (!mPendingFrames.isEmpty()) {
AVFrame* avFrame = convertFrame(mPendingFrames.dequeue());
AVFrame* avFrame = convertImageFrame(mPendingFrames.dequeue());
if (avFrame)
mVideoWorker->queueFrame(avFrame);
}
// note: if converting the frame turns out to be too slow to do here, it
// can always be done from the worker thread (in thta case,
// can always be done from the worker thread (in that case,
// the worker's queue would contain ImageFrames rather than AVFrames)
AVFrame* avFrame = convertFrame({pImage, timestamp});
AVFrame* avFrame = convertImageFrame({pImage, timestamp});
if (avFrame)
mVideoWorker->queueFrame(avFrame);
@ -219,17 +310,18 @@ void UBFFmpegVideoEncoder::newPixmap(const QImage &pImage, long timestamp)
}
}
/** Convert a frame consisting of a QImage and timestamp to an AVFrame
/**
* Convert a frame consisting of a QImage and timestamp to an AVFrame
* with the right pixel format and PTS
*/
AVFrame* UBFFmpegVideoEncoder::convertFrame(ImageFrame frame)
AVFrame* UBFFmpegVideoEncoder::convertImageFrame(ImageFrame frame)
{
AVFrame* avFrame = av_frame_alloc();
avFrame->format = mVideoStream->codec->pix_fmt;
avFrame->width = mVideoStream->codec->width;
avFrame->height = mVideoStream->codec->height;
avFrame->pts = mTimebase * frame.timestamp / 1000;
avFrame->pts = mVideoTimebase * frame.timestamp / 1000;
const uchar * rgbImage = frame.image.bits();
@ -254,6 +346,93 @@ AVFrame* UBFFmpegVideoEncoder::convertFrame(ImageFrame frame)
return avFrame;
}
void UBFFmpegVideoEncoder::onAudioAvailable(QByteArray data)
{
if (!data.isEmpty())
processAudio(data);
}
/**
* Resample and convert audio to match the encoder's settings and queue the
* output. If enough output data is available, it is packaged into AVFrames and
* sent to the encoder thread.
*/
void UBFFmpegVideoEncoder::processAudio(QByteArray &data)
{
int ret;
AVCodecContext* codecContext = mAudioStream->codec;
const char * inSamples = data.constData();
// The number of samples (per channel) in the input
int inSamplesCount = data.size() / ((mAudioInput->sampleSize() / 8) * mAudioInput->channelCount());
// The number of samples we will get after conversion
int outSamplesCount = swr_get_out_samples(mSwrContext, inSamplesCount);
// Allocate output samples
uint8_t ** outSamples = NULL;
int outSamplesLineSize;
ret = av_samples_alloc_array_and_samples(&outSamples, &outSamplesLineSize,
codecContext->channels, outSamplesCount,
codecContext->sample_fmt, 0);
if (ret < 0) {
qDebug() << "Could not allocate audio samples" << avErrorToQString(ret);
return;
}
// Convert to destination format
ret = swr_convert(mSwrContext,
outSamples, outSamplesCount,
(const uint8_t **)&inSamples, inSamplesCount);
if (ret < 0) {
qDebug() << "Error converting audio samples: " << avErrorToQString(ret);
return;
}
// Append the converted samples to the out buffer.
ret = av_audio_fifo_write(mAudioOutBuffer, (void**)outSamples, outSamplesCount);
if (ret < 0) {
qDebug() << "Could not write to FIFO queue: " << avErrorToQString(ret);
return;
}
// Keep the data queued until next call if the encoder thread isn't running
if (!mVideoWorker->isRunning())
return;
bool framesAdded = false;
while (av_audio_fifo_size(mAudioOutBuffer) > codecContext->frame_size) {
AVFrame * avFrame = av_frame_alloc();
avFrame->nb_samples = codecContext->frame_size;
avFrame->channel_layout = codecContext->channel_layout;
avFrame->format = codecContext->sample_fmt;
avFrame->sample_rate = codecContext->sample_rate;
avFrame->pts = mAudioFrameCount;
ret = av_frame_get_buffer(avFrame, 0);
if (ret < 0) {
qDebug() << "Couldn't allocate frame: " << avErrorToQString(ret);
break;
}
ret = av_audio_fifo_read(mAudioOutBuffer, (void**)avFrame->data, codecContext->frame_size);
if (ret < 0)
qDebug() << "Could not read from FIFO queue: " << avErrorToQString(ret);
else {
mAudioFrameCount += codecContext->frame_size;
mVideoWorker->queueAudio(avFrame);
framesAdded = true;
}
}
if (framesAdded)
mVideoWorker->mWaitCondition.wakeAll();
}
void UBFFmpegVideoEncoder::finishEncoding()
{
qDebug() << "VideoEncoder::finishEncoding called";
@ -264,7 +443,7 @@ void UBFFmpegVideoEncoder::finishEncoding()
do {
// TODO: get rid of duplicated code (videoWorker does almost exactly this during encoding)
AVPacket* packet = mVideoWorker->mPacket;
AVPacket* packet = mVideoWorker->mVideoPacket;
if (avcodec_encode_video2(mVideoStream->codec, packet, NULL, &gotOutput) < 0) {
setLastErrorMessage("Couldn't encode frame to video");
@ -272,9 +451,9 @@ void UBFFmpegVideoEncoder::finishEncoding()
}
if (gotOutput) {
AVRational codecTimebase = mVideoStream->codec->time_base;
AVRational streamTimebase = mVideoStream->time_base;
AVRational streamVideoTimebase = mVideoStream->time_base;
av_packet_rescale_ts(packet, codecTimebase, streamTimebase);
av_packet_rescale_ts(packet, codecTimebase, streamVideoTimebase);
packet->stream_index = mVideoStream->index;
av_interleaved_write_frame(mOutputFormatContext, packet);
@ -282,16 +461,48 @@ void UBFFmpegVideoEncoder::finishEncoding()
}
} while (gotOutput);
if (mShouldRecordAudio) {
int gotOutput, ret;
do {
AVPacket* packet = mVideoWorker->mAudioPacket;
ret = avcodec_encode_audio2(mAudioStream->codec, packet, NULL, &gotOutput);
if (ret < 0)
setLastErrorMessage("Couldn't encode frame to audio");
else if (gotOutput) {
AVRational codecTimebase = mAudioStream->codec->time_base;
AVRational streamVideoTimebase = mAudioStream->time_base;
av_packet_rescale_ts(packet, codecTimebase, streamVideoTimebase);
packet->stream_index = mAudioStream->index;
av_interleaved_write_frame(mOutputFormatContext, packet);
av_packet_unref(packet);
}
} while (gotOutput);
}
av_write_trailer(mOutputFormatContext);
avio_close(mOutputFormatContext->pb);
avcodec_close(mVideoStream->codec);
sws_freeContext(mSwsContext);
if (mShouldRecordAudio) {
avcodec_close(mAudioStream->codec);
swr_free(&mSwrContext);
}
avformat_free_context(mOutputFormatContext);
emit encodingFinished(true);
}
//-------------------------------------------------------------------------
// Worker
//-------------------------------------------------------------------------
@ -301,7 +512,8 @@ UBFFmpegVideoEncoderWorker::UBFFmpegVideoEncoderWorker(UBFFmpegVideoEncoder* con
{
mStopRequested = false;
mIsRunning = false;
mPacket = new AVPacket();
mVideoPacket = new AVPacket();
mAudioPacket = new AVPacket();
}
UBFFmpegVideoEncoderWorker::~UBFFmpegVideoEncoderWorker()
@ -316,10 +528,22 @@ void UBFFmpegVideoEncoderWorker::stopEncoding()
void UBFFmpegVideoEncoderWorker::queueFrame(AVFrame* frame)
{
if (frame) {
mFrameQueueMutex.lock();
mImageQueue.enqueue(frame);
mFrameQueueMutex.unlock();
}
}
void UBFFmpegVideoEncoderWorker::queueAudio(AVFrame* frame)
{
if (frame) {
mFrameQueueMutex.lock();
mFrameQueue.enqueue(frame);
mAudioQueue.enqueue(frame);
mFrameQueueMutex.unlock();
}
}
/**
* The main encoding function. Takes the queued image frames and
@ -333,15 +557,13 @@ void UBFFmpegVideoEncoderWorker::runEncoding()
mFrameQueueMutex.lock();
mWaitCondition.wait(&mFrameQueueMutex);
while (!mFrameQueue.isEmpty()) {
while (!mImageQueue.isEmpty()) {
writeLatestVideoFrame();
}
/*
while (!mAudioQueue.isEmpty()) {
writeLatestAudioFrame();
}
*/
mFrameQueueMutex.unlock();
}
@ -351,31 +573,31 @@ void UBFFmpegVideoEncoderWorker::runEncoding()
void UBFFmpegVideoEncoderWorker::writeLatestVideoFrame()
{
AVFrame* frame = mFrameQueue.dequeue();
AVFrame* frame = mImageQueue.dequeue();
int gotOutput;
av_init_packet(mPacket);
mPacket->data = NULL;
mPacket->size = 0;
av_init_packet(mVideoPacket);
mVideoPacket->data = NULL;
mVideoPacket->size = 0;
// qDebug() << "Encoding frame to video. Pts: " << frame->pts << "/" << mController->mTimebase;
// qDebug() << "Encoding frame to video. Pts: " << frame->pts << "/" << mController->mVideoTimebase;
if (avcodec_encode_video2(mController->mVideoStream->codec, mPacket, frame, &gotOutput) < 0)
emit error("Error encoding frame to video");
if (avcodec_encode_video2(mController->mVideoStream->codec, mVideoPacket, frame, &gotOutput) < 0)
emit error("Error encoding video frame");
if (gotOutput) {
AVRational codecTimebase = mController->mVideoStream->codec->time_base;
AVRational streamTimebase = mController->mVideoStream->time_base;
AVRational streamVideoTimebase = mController->mVideoStream->time_base;
// recalculate the timestamp to match the stream's timebase
av_packet_rescale_ts(mPacket, codecTimebase, streamTimebase);
mPacket->stream_index = mController->mVideoStream->index;
av_packet_rescale_ts(mVideoPacket, codecTimebase, streamVideoTimebase);
mVideoPacket->stream_index = mController->mVideoStream->index;
// qDebug() << "Writing encoded packet to file; pts: " << mPacket->pts << "/" << streamTimebase.den;
// qDebug() << "Writing encoded packet to file; pts: " << mVideoPacket->pts << "/" << streamVideoTimebase.den;
av_interleaved_write_frame(mController->mOutputFormatContext, mPacket);
av_packet_unref(mPacket);
av_interleaved_write_frame(mController->mOutputFormatContext, mVideoPacket);
av_packet_unref(mVideoPacket);
}
// Duct-tape solution. I assume there's a better way of doing this, but:
@ -387,10 +609,42 @@ void UBFFmpegVideoEncoderWorker::writeLatestVideoFrame()
if (firstRun) {
firstRun = false;
frame->pts += 1;
mFrameQueue.enqueue(frame); // only works when the queue is empty at this point. todo: clean this up!
mImageQueue.enqueue(frame); // only works when the queue is empty at this point. todo: clean this up!
}
else
// free the frame
av_frame_free(&frame);
}
void UBFFmpegVideoEncoderWorker::writeLatestAudioFrame()
{
AVFrame *frame = mAudioQueue.dequeue();
int gotOutput, ret;
av_init_packet(mAudioPacket);
mAudioPacket->data = NULL;
mAudioPacket->size = 0;
//qDebug() << "Encoding audio frame";
ret = avcodec_encode_audio2(mController->mAudioStream->codec, mAudioPacket, frame, &gotOutput);
if (ret < 0)
emit error(QString("Error encoding audio frame: ") + avErrorToQString(ret));
else if (gotOutput) {
//qDebug() << "Writing audio frame to stream";
AVRational codecTimebase = mController->mAudioStream->codec->time_base;
AVRational streamVideoTimebase = mController->mAudioStream->time_base;
av_packet_rescale_ts(mAudioPacket, codecTimebase, streamVideoTimebase);
mAudioPacket->stream_index = mController->mAudioStream->index;
av_interleaved_write_frame(mController->mOutputFormatContext, mAudioPacket);
av_packet_unref(mAudioPacket);
}
av_frame_free(&frame);
}

@ -5,20 +5,23 @@ extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavformat/avio.h>
#include <libavutil/audio_fifo.h>
#include <libavutil/avutil.h>
#include <libavutil/imgutils.h>
#include <libavutil/opt.h>
#include <libavutil/mathematics.h>
#include <libavutil/time.h>
#include <libswscale/swscale.h>
#include <libswresample/swresample.h>
}
#include <atomic>
#include <stdio.h>
#include <QtCore>
#include <QImage>
#include "podcast/UBAbstractVideoEncoder.h"
#include "podcast/ffmpeg/UBMicrophoneInput.h"
class UBFFmpegVideoEncoderWorker;
class UBPodcastController;
@ -45,7 +48,6 @@ public:
void setRecordAudio(bool pRecordAudio) { mShouldRecordAudio = pRecordAudio; }
signals:
void encodingFinished(bool ok);
@ -53,6 +55,7 @@ signals:
private slots:
void setLastErrorMessage(const QString& pMessage);
void onAudioAvailable(QByteArray data);
void finishEncoding();
private:
@ -63,32 +66,42 @@ private:
long timestamp; // unit: ms
};
AVFrame* convertFrame(ImageFrame frame);
AVFrame* convertImageFrame(ImageFrame frame);
AVFrame* convertAudio(QByteArray data);
void processAudio(QByteArray& data);
bool init();
// Queue for any pixmap that might be sent before the encoder is ready
QQueue<ImageFrame> mPendingFrames;
QString mLastErrorMessage;
bool mShouldRecordAudio;
QThread* mVideoEncoderThread;
UBFFmpegVideoEncoderWorker* mVideoWorker;
// Muxer
// ------------------------------------------
AVFormatContext* mOutputFormatContext;
int mTimebase;
AVStream* mVideoStream;
AVStream* mAudioStream;
// Video
AVStream* mVideoStream;
// ------------------------------------------
QQueue<ImageFrame> mPendingFrames;
struct SwsContext * mSwsContext;
// Audio
AVStream* mAudioStream;
int mVideoTimebase;
// Audio
// ------------------------------------------
bool mShouldRecordAudio;
FILE * mFile;
UBMicrophoneInput * mAudioInput;
struct SwrContext * mSwrContext;
/// Queue for audio that has been rescaled/converted but not encoded yet
AVAudioFifo *mAudioOutBuffer;
/// Sample rate for encoded audio
int mAudioSampleRate;
/// Total audio frames sent to encoder
int mAudioFrameCount;
};
@ -105,6 +118,7 @@ public:
bool isRunning() { return mIsRunning; }
void queueFrame(AVFrame* frame);
void queueAudio(AVFrame *frame);
public slots:
void runEncoding();
@ -117,19 +131,23 @@ signals:
private:
void writeLatestVideoFrame();
void writeLatestAudioFrame();
UBFFmpegVideoEncoder* mController;
// std::atomic is C++11. This won't work with msvc2010, so a
// newer compiler must be used if this is to be used on Windows
// newer compiler must be used if this class is to be used on Windows
std::atomic<bool> mStopRequested;
std::atomic<bool> mIsRunning;
QQueue<AVFrame*> mFrameQueue;
QQueue<AVFrame*> mImageQueue;
QQueue<AVFrame*> mAudioQueue;
QMutex mFrameQueueMutex;
QWaitCondition mWaitCondition;
AVPacket* mPacket;
AVPacket* mVideoPacket;
AVPacket* mAudioPacket;
};
#endif // UBFFMPEGVIDEOENCODER_H

@ -0,0 +1,211 @@
#include "UBMicrophoneInput.h"
UBMicrophoneInput::UBMicrophoneInput()
: mAudioInput(NULL)
, mIODevice(NULL)
, mSeekPos(0)
{
}
UBMicrophoneInput::~UBMicrophoneInput()
{
if (mAudioInput)
delete mAudioInput;
}
int UBMicrophoneInput::channelCount()
{
return mAudioFormat.channelCount();
}
int UBMicrophoneInput::sampleRate()
{
return mAudioFormat.sampleRate();
}
/* Return the sample size in bits */
int UBMicrophoneInput::sampleSize()
{
return mAudioFormat.sampleSize();
}
/** Return the sample format in FFMpeg style (AVSampleFormat enum) */
int UBMicrophoneInput::sampleFormat()
{
enum AVSampleFormat {
AV_SAMPLE_FMT_NONE = -1,
AV_SAMPLE_FMT_U8,
AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_S32,
AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_DBL,
AV_SAMPLE_FMT_U8P,
AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_S32P,
AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NB
};
int sampleSize = mAudioFormat.sampleSize();
QAudioFormat::SampleType sampleType = mAudioFormat.sampleType();
// qDebug() << "Input sample format: " << sampleSize << "bits " << sampleType;
switch (sampleType) {
case QAudioFormat::Unknown:
return AV_SAMPLE_FMT_NONE;
case QAudioFormat::SignedInt:
if (sampleSize == 16)
return AV_SAMPLE_FMT_S16;
if (sampleSize == 32)
return AV_SAMPLE_FMT_S32;
case QAudioFormat::UnSignedInt:
if (sampleSize == 8)
return AV_SAMPLE_FMT_U8;
case QAudioFormat::Float:
return AV_SAMPLE_FMT_FLT;
default:
return AV_SAMPLE_FMT_NONE;
}
}
QString UBMicrophoneInput::codec()
{
return mAudioFormat.codec();
}
qint64 UBMicrophoneInput::processUSecs() const
{
return mAudioInput->processedUSecs();
}
bool UBMicrophoneInput::init()
{
if (mAudioDeviceInfo.isNull()) {
qWarning("No audio input device selected; using default");
mAudioDeviceInfo = QAudioDeviceInfo::defaultInputDevice();
}
qDebug() << "Input device name: " << mAudioDeviceInfo.deviceName();
mAudioFormat = mAudioDeviceInfo.preferredFormat();
mAudioInput = new QAudioInput(mAudioDeviceInfo, mAudioFormat, NULL);
//mAudioInput->setNotifyInterval(100);
connect(mAudioInput, SIGNAL(stateChanged(QAudio::State)),
this, SLOT(onAudioInputStateChanged(QAudio::State)));
return true;
}
void UBMicrophoneInput::start()
{
qDebug() << "starting audio input";
mIODevice = mAudioInput->start();
connect(mIODevice, SIGNAL(readyRead()),
this, SLOT(onDataReady()));
if (mAudioInput->error() == QAudio::OpenError)
qWarning() << "Error opening audio input";
}
void UBMicrophoneInput::stop()
{
mAudioInput->stop();
}
QStringList UBMicrophoneInput::availableDevicesNames()
{
QStringList names;
QList<QAudioDeviceInfo> devices = QAudioDeviceInfo::availableDevices(QAudio::AudioInput);
foreach (QAudioDeviceInfo device, devices) {
names.push_back(device.deviceName());
}
return names;
}
void UBMicrophoneInput::setInputDevice(QString name)
{
if (name.isEmpty()) {
mAudioDeviceInfo = QAudioDeviceInfo::defaultInputDevice();
return;
}
QList<QAudioDeviceInfo> devices = QAudioDeviceInfo::availableDevices(QAudio::AudioInput);
bool found = false;
foreach (QAudioDeviceInfo device, devices) {
if (device.deviceName() == name) {
mAudioDeviceInfo = device;
found = true;
break;
}
}
if (!found) {
qWarning() << "Audio input device not found; using default instead";
mAudioDeviceInfo = QAudioDeviceInfo::defaultInputDevice();
}
}
void UBMicrophoneInput::onDataReady()
{
int numBytes = mAudioInput->bytesReady();
if (numBytes > 0)
emit dataAvailable(mIODevice->read(numBytes));
}
void UBMicrophoneInput::onAudioInputStateChanged(QAudio::State state)
{
qDebug() << "Audio input state changed to " << state;
switch (state) {
case QAudio::StoppedState:
if (mAudioInput->error() != QAudio::NoError) {
emit error(getErrorString(mAudioInput->error()));
}
break;
// handle other states?
default:
break;
}
}
/**
* @brief Return a meaningful error string based on QAudio error codes
*/
QString UBMicrophoneInput::getErrorString(QAudio::Error errorCode)
{
switch (errorCode) {
case QAudio::NoError :
return "";
case QAudio::OpenError :
return "Couldn't open the audio device";
case QAudio::IOError :
return "Error reading from audio device";
case QAudio::UnderrunError :
return "Underrun error";
case QAudio::FatalError :
return "Fatal error; audio device unusable";
}
return "";
}

@ -0,0 +1,57 @@
#ifndef UBMICROPHONEINPUT_H
#define UBMICROPHONEINPUT_H
#include <QtCore>
#include <QAudioInput>
/**
* @brief The UBMicrophoneInput class captures uncompressed sound from a microphone
*/
class UBMicrophoneInput : public QObject
{
Q_OBJECT
public:
UBMicrophoneInput();
virtual ~UBMicrophoneInput();
bool init();
void start();
void stop();
static QStringList availableDevicesNames();
void setInputDevice(QString name = "");
int channelCount();
int sampleRate();
int sampleSize();
int sampleFormat();
QString codec();
qint64 processUSecs() const;
signals:
/// Send the new volume, between 0 and 255
void audioLevelChanged(quint8 level);
/// Emitted when new audio data is available
void dataAvailable(QByteArray data);
void error(QString message);
private slots:
void onAudioInputStateChanged(QAudio::State state);
void onDataReady();
private:
QString getErrorString(QAudio::Error errorCode);
QAudioInput* mAudioInput;
QIODevice * mIODevice;
QAudioDeviceInfo mAudioDeviceInfo;
QAudioFormat mAudioFormat;
qint64 mSeekPos;
};
#endif // UBMICROPHONEINPUT_H

@ -3,13 +3,15 @@ HEADERS += src/podcast/UBPodcastController.h \
src/podcast/UBAbstractVideoEncoder.h \
src/podcast/UBPodcastRecordingPalette.h \
src/podcast/youtube/UBYouTubePublisher.h \
src/podcast/intranet/UBIntranetPodcastPublisher.h
src/podcast/intranet/UBIntranetPodcastPublisher.h \
$$PWD/ffmpeg/UBMicrophoneInput.h
SOURCES += src/podcast/UBPodcastController.cpp \
src/podcast/UBAbstractVideoEncoder.cpp \
src/podcast/UBPodcastRecordingPalette.cpp \
src/podcast/youtube/UBYouTubePublisher.cpp \
src/podcast/intranet/UBIntranetPodcastPublisher.cpp
src/podcast/intranet/UBIntranetPodcastPublisher.cpp \
$$PWD/ffmpeg/UBMicrophoneInput.cpp
win32 {

Loading…
Cancel
Save