Commit 9736badc by ziyue

优化编解码相关代码

parent 0739b1dd
...@@ -84,7 +84,7 @@ int main(int argc, char *argv[]) { ...@@ -84,7 +84,7 @@ int main(int argc, char *argv[]) {
}); });
}); });
auto delegate = std::make_shared<FrameWriterInterfaceHelper>([decoder](const Frame::Ptr &frame) { auto delegate = std::make_shared<FrameWriterInterfaceHelper>([decoder](const Frame::Ptr &frame) {
return decoder->inputFrame(frame, false); return decoder->inputFrame(frame, false, true);
}); });
videoTrack->addDelegate(delegate); videoTrack->addDelegate(delegate);
} }
...@@ -106,7 +106,7 @@ int main(int argc, char *argv[]) { ...@@ -106,7 +106,7 @@ int main(int argc, char *argv[]) {
audio_player->playPCM((const char *) (pcm->get()->data[0]), MIN(len, frame->get()->linesize[0])); audio_player->playPCM((const char *) (pcm->get()->data[0]), MIN(len, frame->get()->linesize[0]));
}); });
auto audio_delegate = std::make_shared<FrameWriterInterfaceHelper>( [decoder](const Frame::Ptr &frame) { auto audio_delegate = std::make_shared<FrameWriterInterfaceHelper>( [decoder](const Frame::Ptr &frame) {
return decoder->inputFrame(frame, false); return decoder->inputFrame(frame, false, true);
}); });
audioTrack->addDelegate(audio_delegate); audioTrack->addDelegate(audio_delegate);
} }
......
...@@ -9,17 +9,14 @@ ...@@ -9,17 +9,14 @@
*/ */
#ifdef ENABLE_X264 #ifdef ENABLE_X264
#include "H264Encoder.h"
#include "H264Encoder.h"
#include "Util/TimeTicker.h" #include "Util/TimeTicker.h"
using namespace toolkit; using namespace toolkit;
namespace mediakit { namespace mediakit {
H264Encoder::H264Encoder() { H264Encoder::H264Encoder() {}
}
H264Encoder::~H264Encoder() { H264Encoder::~H264Encoder() {
//* 清除图像区域 //* 清除图像区域
...@@ -39,7 +36,6 @@ H264Encoder::~H264Encoder() { ...@@ -39,7 +36,6 @@ H264Encoder::~H264Encoder() {
} }
} }
/*typedef struct x264_param_t /*typedef struct x264_param_t
{ {
CPU 标志位 CPU 标志位
...@@ -212,7 +208,7 @@ Value的值就是fps。 ...@@ -212,7 +208,7 @@ Value的值就是fps。
void (*param_free)( void* ); void (*param_free)( void* );
} x264_param_t;*/ } x264_param_t;*/
bool H264Encoder::init(int iWidth, int iHeight, int iFps) { bool H264Encoder::init(int iWidth, int iHeight, int iFps, int iBitRate) {
if (_pX264Handle) { if (_pX264Handle) {
return true; return true;
} }
...@@ -230,7 +226,7 @@ bool H264Encoder::init(int iWidth, int iHeight, int iFps) { ...@@ -230,7 +226,7 @@ bool H264Encoder::init(int iWidth, int iHeight, int iFps) {
pX264Param->i_keyint_max = iFps * 3; //ffmpeg:gop_size 关键帧最大间隔 pX264Param->i_keyint_max = iFps * 3; //ffmpeg:gop_size 关键帧最大间隔
pX264Param->i_keyint_min = iFps * 1; //ffmpeg:keyint_min 关键帧最小间隔 pX264Param->i_keyint_min = iFps * 1; //ffmpeg:keyint_min 关键帧最小间隔
//* Rate control Parameters //* Rate control Parameters
pX264Param->rc.i_bitrate = 5000; //* 码率(比特率,单位Kbps) pX264Param->rc.i_bitrate = iBitRate / 1000; //* 码率(比特率,单位Kbps)
pX264Param->rc.i_qp_step = 1; //最大的在帧与帧之间进行切变的量化因子的变化量。ffmpeg:max_qdiff pX264Param->rc.i_qp_step = 1; //最大的在帧与帧之间进行切变的量化因子的变化量。ffmpeg:max_qdiff
pX264Param->rc.i_qp_min = 10; //ffmpeg:qmin;最小的量化因子。取值范围1-51。建议在10-30之间。 pX264Param->rc.i_qp_min = 10; //ffmpeg:qmin;最小的量化因子。取值范围1-51。建议在10-30之间。
pX264Param->rc.i_qp_max = 41; //ffmpeg:qmax;最大的量化因子。取值范围1-51。建议在10-30之间。 pX264Param->rc.i_qp_max = 41; //ffmpeg:qmax;最大的量化因子。取值范围1-51。建议在10-30之间。
...@@ -304,20 +300,19 @@ bool H264Encoder::init(int iWidth, int iHeight, int iFps) { ...@@ -304,20 +300,19 @@ bool H264Encoder::init(int iWidth, int iHeight, int iFps) {
return true; return true;
} }
int H264Encoder::inputData(char* apcYuv[3], int aiYuvLen[3], int64_t i64Pts, H264Frame** ppFrame) { int H264Encoder::inputData(char *yuv[3], int linesize[3], int64_t cts, H264Frame **out_frame) {
//TimeTicker1(5); //TimeTicker1(5);
_pPicIn->img.i_stride[0] = aiYuvLen[0]; _pPicIn->img.i_stride[0] = linesize[0];
_pPicIn->img.i_stride[1] = aiYuvLen[1]; _pPicIn->img.i_stride[1] = linesize[1];
_pPicIn->img.i_stride[2] = aiYuvLen[2]; _pPicIn->img.i_stride[2] = linesize[2];
_pPicIn->img.plane[0] = (uint8_t *) apcYuv[0]; _pPicIn->img.plane[0] = (uint8_t *) yuv[0];
_pPicIn->img.plane[1] = (uint8_t *) apcYuv[1]; _pPicIn->img.plane[1] = (uint8_t *) yuv[1];
_pPicIn->img.plane[2] = (uint8_t *) apcYuv[2]; _pPicIn->img.plane[2] = (uint8_t *) yuv[2];
_pPicIn->i_pts = i64Pts; _pPicIn->i_pts = cts;
int iNal; int iNal;
x264_nal_t* pNals; x264_nal_t *pNals;
int iResult = x264_encoder_encode(_pX264Handle, &pNals, &iNal, _pPicIn, int iResult = x264_encoder_encode(_pX264Handle, &pNals, &iNal, _pPicIn, _pPicOut);
_pPicOut);
if (iResult <= 0) { if (iResult <= 0) {
return 0; return 0;
} }
...@@ -327,7 +322,7 @@ int H264Encoder::inputData(char* apcYuv[3], int aiYuvLen[3], int64_t i64Pts, H26 ...@@ -327,7 +322,7 @@ int H264Encoder::inputData(char* apcYuv[3], int aiYuvLen[3], int64_t i64Pts, H26
_aFrames[i].iLength = pNal.i_payload; _aFrames[i].iLength = pNal.i_payload;
_aFrames[i].pucData = pNal.p_payload; _aFrames[i].pucData = pNal.p_payload;
} }
*ppFrame = _aFrames; *out_frame = _aFrames;
return iNal; return iNal;
} }
......
...@@ -7,13 +7,10 @@ ...@@ -7,13 +7,10 @@
* LICENSE file in the root of the source tree. All contributing project authors * LICENSE file in the root of the source tree. All contributing project authors
* may be found in the AUTHORS file in the root of the source tree. * may be found in the AUTHORS file in the root of the source tree.
*/ */
#ifndef CODEC_H264ENCODER_H_ #ifndef CODEC_H264ENCODER_H_
#define CODEC_H264ENCODER_H_ #define CODEC_H264ENCODER_H_
#include <cstdint> #include <cstdint>
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif //__cplusplus #endif //__cplusplus
...@@ -32,14 +29,16 @@ public: ...@@ -32,14 +29,16 @@ public:
uint8_t *pucData; uint8_t *pucData;
} H264Frame; } H264Frame;
H264Encoder(void); H264Encoder();
virtual ~H264Encoder(void); ~H264Encoder();
bool init(int iWidth, int iHeight, int iFps);
int inputData(char *apcYuv[3], int aiYuvLen[3], int64_t i64Pts, H264Frame **ppFrame); bool init(int iWidth, int iHeight, int iFps, int iBitRate);
int inputData(char *yuv[3], int linesize[3], int64_t cts, H264Frame **out_frame);
private: private:
x264_t* _pX264Handle = nullptr; x264_t *_pX264Handle = nullptr;
x264_picture_t* _pPicIn = nullptr; x264_picture_t *_pPicIn = nullptr;
x264_picture_t* _pPicOut = nullptr; x264_picture_t *_pPicOut = nullptr;
H264Frame _aFrames[10]; H264Frame _aFrames[10];
}; };
......
...@@ -9,13 +9,20 @@ ...@@ -9,13 +9,20 @@
*/ */
#if defined(ENABLE_FFMPEG) #if defined(ENABLE_FFMPEG)
#if !defined(_WIN32)
#include <dlfcn.h>
#endif
#include "Util/File.h"
#include "Util/uv_errno.h"
#include "Transcode.h" #include "Transcode.h"
#include "Extension/AAC.h"
#define MAX_DELAY_SECOND 3 #define MAX_DELAY_SECOND 3
using namespace std; using namespace std;
using namespace toolkit; using namespace toolkit;
using namespace mediakit;
namespace mediakit {
static string ffmpeg_err(int errnum) { static string ffmpeg_err(int errnum) {
char errbuf[AV_ERROR_MAX_STRING_SIZE]; char errbuf[AV_ERROR_MAX_STRING_SIZE];
...@@ -23,7 +30,7 @@ static string ffmpeg_err(int errnum) { ...@@ -23,7 +30,7 @@ static string ffmpeg_err(int errnum) {
return errbuf; return errbuf;
} }
std::shared_ptr<AVPacket> alloc_av_packet(){ std::shared_ptr<AVPacket> alloc_av_packet() {
auto pkt = std::shared_ptr<AVPacket>(av_packet_alloc(), [](AVPacket *pkt) { auto pkt = std::shared_ptr<AVPacket>(av_packet_alloc(), [](AVPacket *pkt) {
av_packet_free(&pkt); av_packet_free(&pkt);
}); });
...@@ -33,35 +40,182 @@ std::shared_ptr<AVPacket> alloc_av_packet(){ ...@@ -33,35 +40,182 @@ std::shared_ptr<AVPacket> alloc_av_packet(){
} }
////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////
static void on_ffmpeg_log(void *ctx, int level, const char *fmt, va_list args) {
GET_CONFIG(bool, enable_ffmpeg_log, General::kEnableFFmpegLog);
if (!enable_ffmpeg_log) {
return;
}
LogLevel lev;
switch (level) {
case AV_LOG_FATAL: lev = LError; break;
case AV_LOG_ERROR: lev = LError; break;
case AV_LOG_WARNING: lev = LWarn; break;
case AV_LOG_INFO: lev = LInfo; break;
case AV_LOG_VERBOSE: lev = LDebug; break;
case AV_LOG_DEBUG: lev = LDebug; break;
case AV_LOG_TRACE: lev = LTrace; break;
default: lev = LTrace; break;
}
LoggerWrapper::printLogV(::toolkit::getLogger(), lev, __FILE__, ctx ? av_default_item_name(ctx) : "NULL", level, fmt, args);
}
template<bool decoder = true, typename ...ARGS> static bool setupFFmpeg_l() {
const AVCodec *getCodec(ARGS ...names); av_log_set_level(AV_LOG_TRACE);
av_log_set_flags(AV_LOG_PRINT_LEVEL);
av_log_set_callback(on_ffmpeg_log);
avcodec_register_all();
return true;
}
template<bool decoder = true> static void setupFFmpeg() {
const AVCodec *getCodec(const char *name) { static auto flag = setupFFmpeg_l();
auto codec = decoder ? avcodec_find_decoder_by_name(name) : avcodec_find_encoder_by_name(name); }
if (codec) {
InfoL << (decoder ? "got decoder:" : "got encoder:") << name; static bool checkIfSupportedNvidia_l() {
#if !defined(_WIN32)
GET_CONFIG(bool, check_nvidia_dev, General::kCheckNvidiaDev);
if (!check_nvidia_dev) {
return false;
} }
return codec; auto so = dlopen("libnvcuvid.so.1", RTLD_LAZY);
if (!so) {
WarnL << "libnvcuvid.so.1加载失败:" << get_uv_errmsg();
return false;
}
dlclose(so);
bool find_driver = false;
File::scanDir("/dev", [&](const string &path, bool is_dir) {
if (!is_dir && start_with(path, "/dev/nvidia")) {
//找到nvidia的驱动
find_driver = true;
return false;
}
return true;
}, false);
if (!find_driver) {
WarnL << "英伟达硬件编解码器驱动文件 /dev/nvidia* 不存在";
}
return find_driver;
#else
return false;
#endif
} }
template<bool decoder = true> static bool checkIfSupportedNvidia() {
const AVCodec *getCodec(enum AVCodecID id) { static auto ret = checkIfSupportedNvidia_l();
auto codec = decoder ? avcodec_find_decoder(id) : avcodec_find_encoder(id); return ret;
if (codec) { }
InfoL << (decoder ? "got decoder:" : "got encoder:") << avcodec_get_name(id);
//////////////////////////////////////////////////////////////////////////////////////////
bool TaskManager::addEncodeTask(function<void()> task) {
{
lock_guard<mutex> lck(_task_mtx);
_task.emplace_back(std::move(task));
if (_task.size() > _max_task) {
WarnL << "encoder thread task is too more, now drop frame!";
_task.pop_front();
} }
return codec; }
_sem.post();
return true;
} }
template<bool decoder = true, typename First, typename ...ARGS> bool TaskManager::addDecodeTask(bool key_frame, function<void()> task) {
const AVCodec *getCodec(First first, ARGS ...names) { {
auto codec = getCodec<decoder>(names...); lock_guard<mutex> lck(_task_mtx);
if (codec) { if (_decode_drop_start) {
return codec; if (!key_frame) {
TraceL << "decode thread drop frame";
return false;
} }
return getCodec<decoder>(first); _decode_drop_start = false;
InfoL << "decode thread stop drop frame";
}
_task.emplace_back(std::move(task));
if (_task.size() > _max_task) {
_decode_drop_start = true;
WarnL << "decode thread start drop frame";
}
}
_sem.post();
return true;
}
void TaskManager::setMaxTaskSize(size_t size) {
CHECK(size >= 3 && size <= 1000, "async task size limited to 3 ~ 1000, now size is:", size);
_max_task = size;
}
void TaskManager::startThread(const string &name) {
_thread.reset(new thread([this, name]() {
onThreadRun(name);
}), [](thread *ptr) {
ptr->join();
delete ptr;
});
}
void TaskManager::stopThread(bool drop_task) {
TimeTicker();
if (!_thread) {
return;
}
{
lock_guard<mutex> lck(_task_mtx);
if (drop_task) {
_exit = true;
_task.clear();
}
_task.emplace_back([]() {
throw ThreadExitException();
});
}
_sem.post(10);
_thread = nullptr;
}
TaskManager::~TaskManager() {
stopThread(true);
}
bool TaskManager::isEnabled() const {
return _thread.operator bool();
}
void TaskManager::onThreadRun(const string &name) {
setThreadName(name.data());
function<void()> task;
_exit = false;
while (!_exit) {
_sem.wait();
{
unique_lock<mutex> lck(_task_mtx);
if (_task.empty()) {
continue;
}
task = _task.front();
_task.pop_front();
}
try {
TimeTicker2(50, TraceL);
task();
task = nullptr;
} catch (ThreadExitException &ex) {
break;
} catch (std::exception &ex) {
WarnL << ex.what();
continue;
} catch (...) {
WarnL << "catch one unknown exception";
throw;
}
}
InfoL << name << " exited!";
} }
////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////
...@@ -87,73 +241,68 @@ AVFrame *FFmpegFrame::get() const { ...@@ -87,73 +241,68 @@ AVFrame *FFmpegFrame::get() const {
return _frame.get(); return _frame.get();
} }
//////////////////////////////////////////////////////////////////////////////////////////////////////////// void FFmpegFrame::fillPicture(AVPixelFormat target_format, int target_width, int target_height) {
assert(_data == nullptr);
FFmpegSwr::FFmpegSwr(AVSampleFormat output, int channel, int channel_layout, int samplerate) { _data = new char[avpicture_get_size(target_format, target_width, target_height)];
_target_format = output; avpicture_fill((AVPicture *) _frame.get(), (uint8_t *) _data, target_format, target_width, target_height);
_target_channels = channel;
_target_channel_layout = channel_layout;
_target_samplerate = samplerate;
} }
FFmpegSwr::~FFmpegSwr() { ///////////////////////////////////////////////////////////////////////////
if (_ctx) {
swr_free(&_ctx); template<bool decoder = true, typename ...ARGS>
AVCodec *getCodec(ARGS ...names);
template<bool decoder = true>
AVCodec *getCodec(const char *name) {
auto codec = decoder ? avcodec_find_decoder_by_name(name) : avcodec_find_encoder_by_name(name);
if (codec) {
InfoL << (decoder ? "got decoder:" : "got encoder:") << name;
} else {
TraceL << (decoder ? "decoder:" : "encoder:") << name << " not found";
} }
return codec;
} }
FFmpegFrame::Ptr FFmpegSwr::inputFrame(const FFmpegFrame::Ptr &frame) { template<bool decoder = true>
if (frame->get()->format == _target_format && AVCodec *getCodec(enum AVCodecID id) {
frame->get()->channels == _target_channels && auto codec = decoder ? avcodec_find_decoder(id) : avcodec_find_encoder(id);
frame->get()->channel_layout == (uint64_t)_target_channel_layout && if (codec) {
frame->get()->sample_rate == _target_samplerate) { InfoL << (decoder ? "got decoder:" : "got encoder:") << avcodec_get_name(id);
//不转格式 } else {
return frame; TraceL << (decoder ? "decoder:" : "encoder:") << avcodec_get_name(id) << " not found";
}
if (!_ctx) {
_ctx = swr_alloc_set_opts(nullptr, _target_channel_layout, _target_format, _target_samplerate,
frame->get()->channel_layout, (AVSampleFormat) frame->get()->format,
frame->get()->sample_rate, 0, nullptr);
InfoL << "swr_alloc_set_opts:" << av_get_sample_fmt_name((enum AVSampleFormat) frame->get()->format) << " -> "
<< av_get_sample_fmt_name(_target_format);
} }
if (_ctx) { return codec;
auto out = std::make_shared<FFmpegFrame>(); }
out->get()->format = _target_format;
out->get()->channel_layout = _target_channel_layout;
out->get()->channels = _target_channels;
out->get()->sample_rate = _target_samplerate;
out->get()->pkt_dts = frame->get()->pkt_dts;
out->get()->pts = frame->get()->pts;
int ret = 0; template<bool decoder = true, typename First, typename ...ARGS>
if(0 != (ret = swr_convert_frame(_ctx, out->get(), frame->get()))){ AVCodec *getCodec(First first, ARGS ...names) {
WarnL << "swr_convert_frame failed:" << ffmpeg_err(ret); auto codec = getCodec<decoder>(names...);
return nullptr; if (codec) {
} return codec;
return out;
} }
return getCodec<decoder>(first);
return nullptr;
} }
FFmpegDecoder::FFmpegDecoder(const Track::Ptr &track, int thread_num) {
/////////////////////////////////////////////////////////////////////////// setupFFmpeg();
AVCodec *codec = nullptr;
FFmpegDecoder::FFmpegDecoder(const Track::Ptr &track) { AVCodec *codec_default = nullptr;
#if (LIBAVCODEC_VERSION_MAJOR < 58)
avcodec_register_all();
#endif
const AVCodec *codec = nullptr;
const AVCodec *codec_default = nullptr;
switch (track->getCodecId()) { switch (track->getCodecId()) {
case CodecH264: case CodecH264:
codec_default = getCodec(AV_CODEC_ID_H264); codec_default = getCodec(AV_CODEC_ID_H264);
codec = getCodec("libopenh264", AV_CODEC_ID_H264, "h264_videotoolbox", "h264_cuvid"); if (checkIfSupportedNvidia()) {
codec = getCodec("libopenh264", AV_CODEC_ID_H264, "h264_qsv", "h264_videotoolbox", "h264_cuvid", "h264_nvmpi");
} else {
codec = getCodec("libopenh264", AV_CODEC_ID_H264, "h264_qsv", "h264_videotoolbox", "h264_nvmpi");
}
break; break;
case CodecH265: case CodecH265:
codec_default = getCodec(AV_CODEC_ID_HEVC); codec_default = getCodec(AV_CODEC_ID_HEVC);
codec = getCodec(AV_CODEC_ID_HEVC, "hevc_videotoolbox", "hevc_cuvid"); if (checkIfSupportedNvidia()) {
codec = getCodec(AV_CODEC_ID_HEVC, "hevc_qsv", "hevc_videotoolbox", "hevc_cuvid", "hevc_nvmpi");
} else {
codec = getCodec(AV_CODEC_ID_HEVC, "hevc_qsv", "hevc_videotoolbox", "hevc_nvmpi");
}
break; break;
case CodecAAC: case CodecAAC:
codec = getCodec(AV_CODEC_ID_AAC); codec = getCodec(AV_CODEC_ID_AAC);
...@@ -167,7 +316,14 @@ FFmpegDecoder::FFmpegDecoder(const Track::Ptr &track) { ...@@ -167,7 +316,14 @@ FFmpegDecoder::FFmpegDecoder(const Track::Ptr &track) {
case CodecOpus: case CodecOpus:
codec = getCodec(AV_CODEC_ID_OPUS); codec = getCodec(AV_CODEC_ID_OPUS);
break; break;
default: break; case CodecVP8:
codec = getCodec(AV_CODEC_ID_VP8);
break;
case CodecVP9:
codec = getCodec(AV_CODEC_ID_VP9);
break;
default:
break;
} }
if (!codec) { if (!codec) {
...@@ -176,7 +332,6 @@ FFmpegDecoder::FFmpegDecoder(const Track::Ptr &track) { ...@@ -176,7 +332,6 @@ FFmpegDecoder::FFmpegDecoder(const Track::Ptr &track) {
while (true) { while (true) {
_context.reset(avcodec_alloc_context3(codec), [](AVCodecContext *ctx) { _context.reset(avcodec_alloc_context3(codec), [](AVCodecContext *ctx) {
avcodec_close(ctx);
avcodec_free_context(&ctx); avcodec_free_context(&ctx);
}); });
...@@ -185,11 +340,13 @@ FFmpegDecoder::FFmpegDecoder(const Track::Ptr &track) { ...@@ -185,11 +340,13 @@ FFmpegDecoder::FFmpegDecoder(const Track::Ptr &track) {
} }
//保存AVFrame的引用 //保存AVFrame的引用
#ifdef FF_API_OLD_ENCDEC
_context->refcounted_frames = 1; _context->refcounted_frames = 1;
#endif
_context->flags |= AV_CODEC_FLAG_LOW_DELAY; _context->flags |= AV_CODEC_FLAG_LOW_DELAY;
_context->flags2 |= AV_CODEC_FLAG2_FAST; _context->flags2 |= AV_CODEC_FLAG2_FAST;
if (track->getTrackType() == TrackVideo) {
_context->width = static_pointer_cast<VideoTrack>(track)->getVideoWidth();
_context->height = static_pointer_cast<VideoTrack>(track)->getVideoHeight();
}
switch (track->getCodecId()) { switch (track->getCodecId()) {
case CodecG711A: case CodecG711A:
...@@ -204,7 +361,11 @@ FFmpegDecoder::FFmpegDecoder(const Track::Ptr &track) { ...@@ -204,7 +361,11 @@ FFmpegDecoder::FFmpegDecoder(const Track::Ptr &track) {
break; break;
} }
AVDictionary *dict = nullptr; AVDictionary *dict = nullptr;
if (thread_num <= 0) {
av_dict_set(&dict, "threads", "auto", 0); av_dict_set(&dict, "threads", "auto", 0);
} else {
av_dict_set(&dict, "threads", to_string(MIN(thread_num, thread::hardware_concurrency())).data(), 0);
}
av_dict_set(&dict, "zerolatency", "1", 0); av_dict_set(&dict, "zerolatency", "1", 0);
av_dict_set(&dict, "strict", "-2", 0); av_dict_set(&dict, "strict", "-2", 0);
...@@ -232,21 +393,27 @@ FFmpegDecoder::FFmpegDecoder(const Track::Ptr &track) { ...@@ -232,21 +393,27 @@ FFmpegDecoder::FFmpegDecoder(const Track::Ptr &track) {
} }
throw std::runtime_error(StrPrinter << "打开解码器" << codec->name << "失败:" << ffmpeg_err(ret)); throw std::runtime_error(StrPrinter << "打开解码器" << codec->name << "失败:" << ffmpeg_err(ret));
} }
if (track->getTrackType() == TrackVideo) {
startThread("decoder thread");
}
} }
FFmpegDecoder::~FFmpegDecoder() { FFmpegDecoder::~FFmpegDecoder() {
stopThread(); stopThread(true);
if (_do_merger) {
_merger.inputFrame(nullptr, [&](uint32_t dts, uint32_t pts, const Buffer::Ptr &buffer, bool have_idr) {
decodeFrame(buffer->data(), buffer->size(), dts, pts, false);
});
}
flush();
} }
void FFmpegDecoder::flush() { void FFmpegDecoder::flush() {
while (true) { while (true) {
auto out_frame = std::make_shared<FFmpegFrame>(); auto out_frame = std::make_shared<FFmpegFrame>();
auto ret = avcodec_receive_frame(_context.get(), out_frame->get()); auto ret = avcodec_receive_frame(_context.get(), out_frame->get());
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { if (ret == AVERROR(EAGAIN)) {
avcodec_send_packet(_context.get(), nullptr);
continue;
}
if (ret == AVERROR_EOF) {
break; break;
} }
if (ret < 0) { if (ret < 0) {
...@@ -261,29 +428,35 @@ const AVCodecContext *FFmpegDecoder::getContext() const { ...@@ -261,29 +428,35 @@ const AVCodecContext *FFmpegDecoder::getContext() const {
return _context.get(); return _context.get();
} }
bool FFmpegDecoder::inputFrame_l(const Frame::Ptr &frame) { bool FFmpegDecoder::inputFrame_l(const Frame::Ptr &frame, bool live, bool enable_merge) {
if (_do_merger) { if (_do_merger && enable_merge) {
return _merger.inputFrame(frame, [&](uint32_t dts, uint32_t pts, const Buffer::Ptr &buffer, bool have_idr) { return _merger.inputFrame(frame, [&](uint32_t dts, uint32_t pts, const Buffer::Ptr &buffer, bool have_idr) {
decodeFrame(buffer->data(), buffer->size(), dts, pts); decodeFrame(buffer->data(), buffer->size(), dts, pts, live);
}); });
} }
return decodeFrame(frame->data(), frame->size(), frame->dts(), frame->pts());
return decodeFrame(frame->data(), frame->size(), frame->dts(), frame->pts(), live);
} }
bool FFmpegDecoder::inputFrame(const Frame::Ptr &frame, bool may_async) { bool FFmpegDecoder::inputFrame(const Frame::Ptr &frame, bool live, bool async, bool enable_merge) {
if (!may_async || !TaskManager::isEnabled()) { if (async && !TaskManager::isEnabled() && getContext()->codec_type == AVMEDIA_TYPE_VIDEO) {
return inputFrame_l(frame); //开启异步编码,且为视频,尝试启动异步解码线程
startThread("decoder thread");
}
if (!async || !TaskManager::isEnabled()) {
return inputFrame_l(frame, live, enable_merge);
} }
auto frame_cache = Frame::getCacheAbleFrame(frame); auto frame_cache = Frame::getCacheAbleFrame(frame);
addDecodeTask(frame->keyFrame(), [this, frame_cache]() { return addDecodeTask(frame->keyFrame(), [this, live, frame_cache, enable_merge]() {
inputFrame_l(frame_cache); inputFrame_l(frame_cache, live, enable_merge);
//此处模拟解码太慢导致的主动丢帧 //此处模拟解码太慢导致的主动丢帧
//usleep(100 * 1000); //usleep(100 * 1000);
}); });
return true;
} }
bool FFmpegDecoder::decodeFrame(const char *data, size_t size, uint32_t dts, uint32_t pts) { bool FFmpegDecoder::decodeFrame(const char *data, size_t size, uint32_t dts, uint32_t pts, bool live) {
TimeTicker2(30, TraceL); TimeTicker2(30, TraceL);
auto pkt = alloc_av_packet(); auto pkt = alloc_av_packet();
...@@ -310,7 +483,7 @@ bool FFmpegDecoder::decodeFrame(const char *data, size_t size, uint32_t dts, uin ...@@ -310,7 +483,7 @@ bool FFmpegDecoder::decodeFrame(const char *data, size_t size, uint32_t dts, uin
WarnL << "avcodec_receive_frame failed:" << ffmpeg_err(ret); WarnL << "avcodec_receive_frame failed:" << ffmpeg_err(ret);
break; break;
} }
if (pts - out_frame->get()->pts > MAX_DELAY_SECOND * 1000 && _ticker.createdTime() > 10 * 1000) { if (live && pts - out_frame->get()->pts > MAX_DELAY_SECOND * 1000 && _ticker.createdTime() > 10 * 1000) {
//后面的帧才忽略,防止Track无法ready //后面的帧才忽略,防止Track无法ready
WarnL << "解码时,忽略" << MAX_DELAY_SECOND << "秒前的数据:" << pts << " " << out_frame->get()->pts; WarnL << "解码时,忽略" << MAX_DELAY_SECOND << "秒前的数据:" << pts << " " << out_frame->get()->pts;
continue; continue;
...@@ -330,102 +503,139 @@ void FFmpegDecoder::onDecode(const FFmpegFrame::Ptr &frame) { ...@@ -330,102 +503,139 @@ void FFmpegDecoder::onDecode(const FFmpegFrame::Ptr &frame) {
} }
} }
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////
void TaskManager::pushExit(){ FFmpegSwr::FFmpegSwr(AVSampleFormat output, int channel, int channel_layout, int samplerate) {
{ _target_format = output;
lock_guard<mutex> lck(_task_mtx); _target_channels = channel;
_exit = true; _target_channel_layout = channel_layout;
_task.clear(); _target_samplerate = samplerate;
_task.emplace_back([](){
throw ThreadExitException();
});
}
_sem.post(10);
} }
void TaskManager::addEncodeTask(function<void()> task) { FFmpegSwr::~FFmpegSwr() {
{ if (_ctx) {
lock_guard<mutex> lck(_task_mtx); swr_free(&_ctx);
_task.emplace_back(std::move(task));
if (_task.size() > 30) {
WarnL << "encoder thread task is too more, now drop frame!";
_task.pop_front();
}
} }
_sem.post();
} }
void TaskManager::addDecodeTask(bool key_frame, function<void()> task) { FFmpegFrame::Ptr FFmpegSwr::inputFrame(const FFmpegFrame::Ptr &frame) {
{ if (frame->get()->format == _target_format &&
lock_guard<mutex> lck(_task_mtx); frame->get()->channels == _target_channels &&
if (_decode_drop_start) { frame->get()->channel_layout == _target_channel_layout &&
if (!key_frame) { frame->get()->sample_rate == _target_samplerate) {
TraceL << "decode thread drop frame"; //不转格式
return; return frame;
} }
_decode_drop_start = false; if (!_ctx) {
InfoL << "decode thread stop drop frame"; _ctx = swr_alloc_set_opts(nullptr, _target_channel_layout, _target_format, _target_samplerate,
frame->get()->channel_layout, (AVSampleFormat) frame->get()->format,
frame->get()->sample_rate, 0, nullptr);
InfoL << "swr_alloc_set_opts:" << av_get_sample_fmt_name((enum AVSampleFormat) frame->get()->format) << " -> "
<< av_get_sample_fmt_name(_target_format);
} }
if (_ctx) {
auto out = std::make_shared<FFmpegFrame>();
out->get()->format = _target_format;
out->get()->channel_layout = _target_channel_layout;
out->get()->channels = _target_channels;
out->get()->sample_rate = _target_samplerate;
out->get()->pkt_dts = frame->get()->pkt_dts;
out->get()->pts = frame->get()->pts;
_task.emplace_back(std::move(task)); int ret = 0;
if (_task.size() > 30) { if (0 != (ret = swr_convert_frame(_ctx, out->get(), frame->get()))) {
_decode_drop_start = true; WarnL << "swr_convert_frame failed:" << ffmpeg_err(ret);
WarnL << "decode thread start drop frame"; return nullptr;
} }
return out;
} }
_sem.post();
}
void TaskManager::startThread(const string &name) { return nullptr;
_thread.reset(new thread([this, name]() {
onThreadRun(name);
}), [this](thread *ptr) {
pushExit();
ptr->join();
delete ptr;
});
} }
void TaskManager::stopThread() { ////////////////////////////////////////////////////////////////////////////////////////////////////////////
_thread = nullptr;
}
TaskManager::~TaskManager() { FFmpegSws::FFmpegSws(AVPixelFormat output, int width, int height) {
stopThread(); _target_format = output;
_target_width = width;
_target_height = height;
} }
bool TaskManager::isEnabled() const { FFmpegSws::~FFmpegSws() {
return _thread.operator bool(); if (_ctx) {
sws_freeContext(_ctx);
_ctx = nullptr;
}
} }
void TaskManager::onThreadRun(const string &name) { int FFmpegSws::inputFrame(const FFmpegFrame::Ptr &frame, uint8_t *data) {
setThreadName(name.data()); TimeTicker2(30, TraceL);
function<void()> task; if (!_target_width) {
_exit = false; _target_width = frame->get()->width;
while (!_exit) {
_sem.wait();
{
unique_lock<mutex> lck(_task_mtx);
if (_task.empty()) {
continue;
} }
task = _task.front(); if (!_target_height) {
_task.pop_front(); _target_height = frame->get()->height;
}
AVFrame dst;
memset(&dst, 0, sizeof(dst));
avpicture_fill((AVPicture *) &dst, data, _target_format, _target_width, _target_height);
if (!_ctx) {
_ctx = sws_getContext(frame->get()->width, frame->get()->height, (enum AVPixelFormat) frame->get()->format,
_target_width, _target_height, _target_format, SWS_FAST_BILINEAR, NULL, NULL, NULL);
InfoL << "sws_getContext:" << av_get_pix_fmt_name((enum AVPixelFormat) frame->get()->format) << " -> "
<< av_get_pix_fmt_name(_target_format);
}
assert(_ctx);
int ret = 0;
if (0 >= (ret = sws_scale(_ctx, frame->get()->data, frame->get()->linesize, 0, frame->get()->height, dst.data,
dst.linesize))) {
WarnL << "sws_scale failed:" << ffmpeg_err(ret);
} }
return ret;
}
try { FFmpegFrame::Ptr FFmpegSws::inputFrame(const FFmpegFrame::Ptr &frame) {
TimeTicker2(50, TraceL); TimeTicker2(30, TraceL);
task();
task = nullptr; if (!_target_width) {
} catch (ThreadExitException &ex) { _target_width = frame->get()->width;
break;
} catch (std::exception &ex) {
WarnL << ex.what();
continue;
} }
if (!_target_height) {
_target_height = frame->get()->height;
} }
InfoL << name << " exited!"; if (frame->get()->format == _target_format && frame->get()->width == _target_width
&& frame->get()->height == _target_height) {
//不转格式
return frame;
}
if (!_ctx) {
_ctx = sws_getContext(frame->get()->width, frame->get()->height, (enum AVPixelFormat) frame->get()->format,
_target_width, _target_height, _target_format,
SWS_FAST_BILINEAR, NULL, NULL, NULL);
InfoL << "sws_getContext:" << av_get_pix_fmt_name((enum AVPixelFormat) frame->get()->format) << " -> "
<< av_get_pix_fmt_name(_target_format);
}
if (_ctx) {
auto out = std::make_shared<FFmpegFrame>();
if (!out->get()->data[0]) {
out->fillPicture(_target_format, _target_width, _target_height);
}
int ret = 0;
if (0 == (ret = sws_scale(_ctx, frame->get()->data, frame->get()->linesize, 0, frame->get()->height,
out->get()->data, out->get()->linesize))) {
WarnL << "sws_scale failed:" << ffmpeg_err(ret);
return nullptr;
}
out->get()->format = _target_format;
out->get()->width = _target_width;
out->get()->height = _target_height;
out->get()->pkt_dts = frame->get()->pkt_dts;
out->get()->pts = frame->get()->pts;
return out;
}
return nullptr;
} }
} //namespace mediakit
#endif//ENABLE_FFMPEG #endif//ENABLE_FFMPEG
...@@ -8,23 +8,29 @@ ...@@ -8,23 +8,29 @@
* may be found in the AUTHORS file in the root of the source tree. * may be found in the AUTHORS file in the root of the source tree.
*/ */
#ifndef FFMpegDecoder_H_ #ifndef ZLMEDIAKIT_TRANSCODE_H
#define FFMpegDecoder_H_ #define ZLMEDIAKIT_TRANSCODE_H
#if defined(ENABLE_FFMPEG)
#include "Util/TimeTicker.h" #include "Util/TimeTicker.h"
#include "Common/MediaSink.h" #include "Common/MediaSink.h"
#if defined(ENABLE_FFMPEG)
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
#include "libswscale/swscale.h"
#include "libavutil/avutil.h"
#include "libavutil/pixdesc.h"
#include "libavcodec/avcodec.h" #include "libavcodec/avcodec.h"
#include "libswresample/swresample.h" #include "libswresample/swresample.h"
#include "libavutil/audio_fifo.h"
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
namespace mediakit {
class FFmpegFrame { class FFmpegFrame {
public: public:
using Ptr = std::shared_ptr<FFmpegFrame>; using Ptr = std::shared_ptr<FFmpegFrame>;
...@@ -33,6 +39,7 @@ public: ...@@ -33,6 +39,7 @@ public:
~FFmpegFrame(); ~FFmpegFrame();
AVFrame *get() const; AVFrame *get() const;
void fillPicture(AVPixelFormat target_format, int target_width, int target_height);
private: private:
char *_data = nullptr; char *_data = nullptr;
...@@ -45,7 +52,6 @@ public: ...@@ -45,7 +52,6 @@ public:
FFmpegSwr(AVSampleFormat output, int channel, int channel_layout, int samplerate); FFmpegSwr(AVSampleFormat output, int channel, int channel_layout, int samplerate);
~FFmpegSwr(); ~FFmpegSwr();
FFmpegFrame::Ptr inputFrame(const FFmpegFrame::Ptr &frame); FFmpegFrame::Ptr inputFrame(const FFmpegFrame::Ptr &frame);
private: private:
...@@ -59,19 +65,19 @@ private: ...@@ -59,19 +65,19 @@ private:
class TaskManager { class TaskManager {
public: public:
TaskManager() = default; TaskManager() = default;
~TaskManager(); virtual ~TaskManager();
void setMaxTaskSize(size_t size);
void stopThread(bool drop_task);
protected: protected:
void startThread(const std::string &name); void startThread(const std::string &name);
void stopThread(); bool addEncodeTask(std::function<void()> task);
bool addDecodeTask(bool key_frame, std::function<void()> task);
void addEncodeTask(std::function<void()> task);
void addDecodeTask(bool key_frame, std::function<void()> task);
bool isEnabled() const; bool isEnabled() const;
private: private:
void onThreadRun(const std::string &name); void onThreadRun(const std::string &name);
void pushExit();
private: private:
class ThreadExitException : public std::runtime_error { class ThreadExitException : public std::runtime_error {
...@@ -83,39 +89,55 @@ private: ...@@ -83,39 +89,55 @@ private:
private: private:
bool _decode_drop_start = false; bool _decode_drop_start = false;
bool _exit = false; bool _exit = false;
size_t _max_task = 30;
std::mutex _task_mtx; std::mutex _task_mtx;
toolkit::semaphore _sem; toolkit::semaphore _sem;
toolkit::List<std::function<void()> > _task; toolkit::List<std::function<void()> > _task;
std::shared_ptr<std::thread> _thread; std::shared_ptr<std::thread> _thread;
}; };
class FFmpegDecoder : private TaskManager { class FFmpegDecoder : public TaskManager {
public: public:
using Ptr = std::shared_ptr<FFmpegDecoder>; using Ptr = std::shared_ptr<FFmpegDecoder>;
using onDec = std::function<void(const FFmpegFrame::Ptr &)>; using onDec = std::function<void(const FFmpegFrame::Ptr &)>;
FFmpegDecoder(const mediakit::Track::Ptr &track); FFmpegDecoder(const Track::Ptr &track, int thread_num = 2);
~FFmpegDecoder(); ~FFmpegDecoder() override;
bool inputFrame(const mediakit::Frame::Ptr &frame, bool may_async = true); bool inputFrame(const Frame::Ptr &frame, bool live, bool async, bool enable_merge = true);
void setOnDecode(onDec cb); void setOnDecode(onDec cb);
void flush(); void flush();
const AVCodecContext *getContext() const; const AVCodecContext *getContext() const;
private: private:
void onDecode(const FFmpegFrame::Ptr &frame); void onDecode(const FFmpegFrame::Ptr &frame);
bool inputFrame_l(const mediakit::Frame::Ptr &frame); bool inputFrame_l(const Frame::Ptr &frame, bool live, bool enable_merge);
bool decodeFrame(const char *data, size_t size, uint32_t dts, uint32_t pts); bool decodeFrame(const char *data, size_t size, uint32_t dts, uint32_t pts, bool live);
private: private:
bool _do_merger = false; bool _do_merger = false;
toolkit::Ticker _ticker; toolkit::Ticker _ticker;
onDec _cb; onDec _cb;
std::shared_ptr<AVCodecContext> _context; std::shared_ptr<AVCodecContext> _context;
mediakit::FrameMerger _merger { mediakit::FrameMerger::h264_prefix }; FrameMerger _merger{FrameMerger::h264_prefix};
}; };
#endif// ENABLE_FFMPEG class FFmpegSws {
#endif /* FFMpegDecoder_H_ */ public:
using Ptr = std::shared_ptr<FFmpegSws>;
FFmpegSws(AVPixelFormat output, int width, int height);
~FFmpegSws();
FFmpegFrame::Ptr inputFrame(const FFmpegFrame::Ptr &frame);
int inputFrame(const FFmpegFrame::Ptr &frame, uint8_t *data);
private:
int _target_width;
int _target_height;
SwsContext *_ctx = nullptr;
AVPixelFormat _target_format;
};
}//namespace mediakit
#endif// ENABLE_FFMPEG
#endif //ZLMEDIAKIT_TRANSCODE_H
...@@ -28,22 +28,22 @@ using namespace std; ...@@ -28,22 +28,22 @@ using namespace std;
namespace mediakit { namespace mediakit {
bool DevChannel::inputYUV(char* apcYuv[3], int aiYuvLen[3], uint32_t uiStamp) { bool DevChannel::inputYUV(char *yuv[3], int linesize[3], uint32_t cts) {
#ifdef ENABLE_X264 #ifdef ENABLE_X264
//TimeTicker1(50); //TimeTicker1(50);
if (!_pH264Enc) { if (!_pH264Enc) {
_pH264Enc.reset(new H264Encoder()); _pH264Enc.reset(new H264Encoder());
if (!_pH264Enc->init(_video->iWidth, _video->iHeight, _video->iFrameRate)) { if (!_pH264Enc->init(_video->iWidth, _video->iHeight, _video->iFrameRate, _video->iBitRate)) {
_pH264Enc.reset(); _pH264Enc.reset();
WarnL << "H264Encoder init failed!"; WarnL << "H264Encoder init failed!";
} }
} }
if (_pH264Enc) { if (_pH264Enc) {
H264Encoder::H264Frame *pOut; H264Encoder::H264Frame *out_frames;
int iFrames = _pH264Enc->inputData(apcYuv, aiYuvLen, uiStamp, &pOut); int frames = _pH264Enc->inputData(yuv, linesize, cts, &out_frames);
bool ret = false; bool ret = false;
for (int i = 0; i < iFrames; i++) { for (int i = 0; i < frames; i++) {
ret = inputH264((char *) pOut[i].pucData, pOut[i].iLength, uiStamp) ? true : ret; ret = inputH264((char *) out_frames[i].pucData, out_frames[i].iLength, cts) ? true : ret;
} }
return ret; return ret;
} }
......
...@@ -29,6 +29,7 @@ public: ...@@ -29,6 +29,7 @@ public:
int iWidth; int iWidth;
int iHeight; int iHeight;
float iFrameRate; float iFrameRate;
int iBitRate = 2 * 1024 * 1024;
}; };
class AudioInfo { class AudioInfo {
...@@ -104,19 +105,19 @@ public: ...@@ -104,19 +105,19 @@ public:
/** /**
* 输入yuv420p视频帧,内部会完成编码并调用inputH264方法 * 输入yuv420p视频帧,内部会完成编码并调用inputH264方法
* @param apcYuv * @param yuv yuv420p数据指针
* @param aiYuvLen * @param linesize yuv420p数据linesize
* @param uiStamp * @param cts 采集时间戳,单位毫秒
*/ */
bool inputYUV(char *apcYuv[3], int aiYuvLen[3], uint32_t uiStamp); bool inputYUV(char *yuv[3], int linesize[3], uint32_t cts);
/** /**
* 输入pcm数据,内部会完成编码并调用inputAAC方法 * 输入pcm数据,内部会完成编码并调用inputAAC方法
* @param pcData * @param data pcm数据指针,int16整形
* @param iDataLen * @param len pcm数据长度
* @param uiStamp * @param cts 采集时间戳,单位毫秒
*/ */
bool inputPCM(char *pcData, int iDataLen, uint32_t uiStamp); bool inputPCM(char *data, int len, uint32_t cts);
private: private:
MediaOriginType getOriginType(MediaSource &sender) const override; MediaOriginType getOriginType(MediaSource &sender) const override;
......
...@@ -42,7 +42,7 @@ bool loadIniConfig(const char *ini_path){ ...@@ -42,7 +42,7 @@ bool loadIniConfig(const char *ini_path){
namespace Broadcast { namespace Broadcast {
const string kBroadcastMediaChanged = "kBroadcastMediaChanged"; const string kBroadcastMediaChanged = "kBroadcastMediaChanged";
const string kBroadcastRecordMP4 = "kBroadcastRecordMP4"; const string kBroadcastRecordMP4 = "kBroadcastRecordMP4";
const string kBroadcastRecordTs = "kBroadcastRecoredTs"; const string kBroadcastRecordTs = "kBroadcastRecordTs";
const string kBroadcastHttpRequest = "kBroadcastHttpRequest"; const string kBroadcastHttpRequest = "kBroadcastHttpRequest";
const string kBroadcastHttpAccess = "kBroadcastHttpAccess"; const string kBroadcastHttpAccess = "kBroadcastHttpAccess";
const string kBroadcastOnGetRtspRealm = "kBroadcastOnGetRtspRealm"; const string kBroadcastOnGetRtspRealm = "kBroadcastOnGetRtspRealm";
...@@ -77,6 +77,8 @@ const string kRtmpDemand = GENERAL_FIELD"rtmp_demand"; ...@@ -77,6 +77,8 @@ const string kRtmpDemand = GENERAL_FIELD"rtmp_demand";
const string kTSDemand = GENERAL_FIELD"ts_demand"; const string kTSDemand = GENERAL_FIELD"ts_demand";
const string kFMP4Demand = GENERAL_FIELD"fmp4_demand"; const string kFMP4Demand = GENERAL_FIELD"fmp4_demand";
const string kEnableAudio = GENERAL_FIELD"enable_audio"; const string kEnableAudio = GENERAL_FIELD"enable_audio";
const string kCheckNvidiaDev = GENERAL_FIELD"check_nvidia_dev";
const string kEnableFFmpegLog = GENERAL_FIELD"enable_ffmpeg_log";
const string kWaitTrackReadyMS = GENERAL_FIELD"wait_track_ready_ms"; const string kWaitTrackReadyMS = GENERAL_FIELD"wait_track_ready_ms";
const string kWaitAddTrackMS = GENERAL_FIELD"wait_add_track_ms"; const string kWaitAddTrackMS = GENERAL_FIELD"wait_add_track_ms";
const string kUnreadyFrameCache = GENERAL_FIELD"unready_frame_cache"; const string kUnreadyFrameCache = GENERAL_FIELD"unready_frame_cache";
...@@ -100,6 +102,8 @@ static onceToken token([](){ ...@@ -100,6 +102,8 @@ static onceToken token([](){
mINI::Instance()[kTSDemand] = 0; mINI::Instance()[kTSDemand] = 0;
mINI::Instance()[kFMP4Demand] = 0; mINI::Instance()[kFMP4Demand] = 0;
mINI::Instance()[kEnableAudio] = 1; mINI::Instance()[kEnableAudio] = 1;
mINI::Instance()[kCheckNvidiaDev] = 1;
mINI::Instance()[kEnableFFmpegLog] = 0;
mINI::Instance()[kWaitTrackReadyMS] = 10000; mINI::Instance()[kWaitTrackReadyMS] = 10000;
mINI::Instance()[kWaitAddTrackMS] = 3000; mINI::Instance()[kWaitAddTrackMS] = 3000;
mINI::Instance()[kUnreadyFrameCache] = 100; mINI::Instance()[kUnreadyFrameCache] = 100;
......
...@@ -183,6 +183,10 @@ extern const std::string kTSDemand; ...@@ -183,6 +183,10 @@ extern const std::string kTSDemand;
extern const std::string kFMP4Demand; extern const std::string kFMP4Demand;
//转协议是否全局开启或忽略音频 //转协议是否全局开启或忽略音频
extern const std::string kEnableAudio; extern const std::string kEnableAudio;
//在docker环境下,不能通过英伟达驱动是否存在来判断是否支持硬件转码
extern const std::string kCheckNvidiaDev;
//是否开启ffmpeg日志
extern const std::string kEnableFFmpegLog;
//最多等待未初始化的Track 10秒,超时之后会忽略未初始化的Track //最多等待未初始化的Track 10秒,超时之后会忽略未初始化的Track
extern const std::string kWaitTrackReadyMS; extern const std::string kWaitTrackReadyMS;
//如果直播流只有单Track,最多等待3秒,超时后未收到其他Track的数据,则认为是单Track //如果直播流只有单Track,最多等待3秒,超时后未收到其他Track的数据,则认为是单Track
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论