Commit a4621896 by baiyfcu Committed by GitHub

Merge pull request #17 from xiongziliang/master

update
parents 05a65d49 f881108d
ZLToolKit @ 5030af90
Subproject commit 4ede70fc435eb0a4d3a752b521170d86440b3935
Subproject commit 5030af90126ea8f01ded6744ae8abdf549d00a81
/*
* Copyright (c) 2016 The ZLMediaKit project authors. All Rights Reserved.
*
* This file is part of ZLMediaKit(https://github.com/xiongziliang/ZLMediaKit).
*
* Use of this source code is governed by MIT license that can be found in the
* LICENSE file in the root of the source tree. All contributing project authors
* may be found in the AUTHORS file in the root of the source tree.
*/
#ifndef ZLMEDIAKIT_ASSERT_H
#define ZLMEDIAKIT_ASSERT_H
#include <stdio.h>
#ifndef NDEBUG
#ifdef assert
#undef assert
#endif//assert
#ifdef __cplusplus
extern "C" {
#endif
extern void Assert_Throw(int failed, const char *exp, const char *func, const char *file, int line);
#ifdef __cplusplus
}
#endif
#define assert(exp) Assert_Throw(!(exp), #exp, __FUNCTION__, __FILE__, __LINE__);
#else
#define assert(e) ((void)0)
#endif//NDEBUG
#endif //ZLMEDIAKIT_ASSERT_H
media-server @ 576216c6
Subproject commit abc08f61bb1250b94d252cfeaea249527912dd3b
Subproject commit 576216c64bf3bcdc5e787da2adb3e169bdd97118
......@@ -39,6 +39,7 @@ set(MediaServer_Root ${CMAKE_CURRENT_SOURCE_DIR}/3rdpart/media-server)
#设置头文件目录
INCLUDE_DIRECTORIES(${ToolKit_Root})
INCLUDE_DIRECTORIES(${MediaKit_Root})
INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/3rdpart)
set(ENABLE_HLS true)
set(ENABLE_OPENSSL true)
......@@ -57,6 +58,8 @@ if (OPENSSL_FOUND AND ENABLE_OPENSSL)
include_directories(${OPENSSL_INCLUDE_DIR})
add_definitions(-DENABLE_OPENSSL)
list(APPEND LINK_LIB_LIST ${OPENSSL_LIBRARIES})
else()
message(WARNING "openssl未找到,rtmp将不支持flash播放器,https/wss/rtsps/rtmps也将失效")
endif ()
#查找mysql是否安装
......@@ -104,9 +107,9 @@ if(ENABLE_HLS)
message(STATUS "ENABLE_HLS defined")
add_definitions(-DENABLE_HLS)
include_directories(${MediaServer_Root}/libmpeg/include)
aux_source_directory(${MediaServer_Root}/libmpeg/include src_mpeg)
aux_source_directory(${MediaServer_Root}/libmpeg/source src_mpeg)
include_directories(${MediaServer_Root}/libmpeg/include)
add_library(mpeg STATIC ${src_mpeg})
list(APPEND LINK_LIB_LIST mpeg)
......@@ -121,13 +124,14 @@ if(ENABLE_MP4)
message(STATUS "ENABLE_MP4 defined")
add_definitions(-DENABLE_MP4)
include_directories(${MediaServer_Root}/libmov/include)
include_directories(${MediaServer_Root}/libflv/include)
aux_source_directory(${MediaServer_Root}/libmov/include src_mov)
aux_source_directory(${MediaServer_Root}/libmov/source src_mov)
include_directories(${MediaServer_Root}/libmov/include)
aux_source_directory(${MediaServer_Root}/libflv/include src_flv)
aux_source_directory(${MediaServer_Root}/libflv/source src_flv)
include_directories(${MediaServer_Root}/libflv/include)
add_library(mov STATIC ${src_mov})
add_library(flv STATIC ${src_flv})
......@@ -141,10 +145,11 @@ endif()
#添加rtp库用于rtp转ps/ts
if(ENABLE_RTPPROXY AND ENABLE_HLS)
message(STATUS "ENABLE_RTPPROXY defined")
include_directories(${MediaServer_Root}/librtp/include)
aux_source_directory(${MediaServer_Root}/librtp/include src_rtp)
aux_source_directory(${MediaServer_Root}/librtp/source src_rtp)
aux_source_directory(${MediaServer_Root}/librtp/payload src_rtp)
include_directories(${MediaServer_Root}/librtp/include)
add_library(rtp STATIC ${src_rtp})
add_definitions(-DENABLE_RTPPROXY)
list(APPEND LINK_LIB_LIST rtp)
......
![logo](https://raw.githubusercontent.com/zlmediakit/ZLMediaKit/master/logo.png)
![logo](https://raw.githubusercontent.com/zlmediakit/ZLMediaKit/master/www/logo.png)
[english readme](https://github.com/xiongziliang/ZLMediaKit/blob/master/README_en.md)
......@@ -30,22 +30,21 @@
## 功能清单
- RTSP
- RTSP 服务器,支持RTMP/MP4转RTSP
- RTSPS 服务器,支持亚马逊echo show这样的设备
- RTSP 播放器,支持RTSP代理,支持生成静音音频
- RTSP 推流客户端与服务器
- RTSP[S]
- RTSP[S] 服务器,支持RTMP/MP4/HLS转RTSP[S],支持亚马逊echo show这样的设备
- RTSP[S] 播放器,支持RTSP代理,支持生成静音音频
- RTSP[S] 推流客户端与服务器
- 支持 `rtp over udp` `rtp over tcp` `rtp over http` `rtp组播` 四种RTP传输方式
- 服务器/客户端完整支持Basic/Digest方式的登录鉴权,全异步可配置化的鉴权接口
- 支持H265编码
- 服务器支持RTSP推流(包括`rtp over udp` `rtp over tcp`方式)
- 支持任意编码格式的rtsp推流,只是除H264/H265/AAC/G711外无法转协议
- RTMP
- RTMP 播放服务器,支持RTSP/MP4转RTMP
- RTMP 发布服务器,支持录制发布流
- RTMP 播放器,支持RTMP代理,支持生成静音音频
- RTMP 推流客户端
- RTMP[S]
- RTMP[S] 播放服务器,支持RTSP/MP4/HLS转RTMP
- RTMP[S] 发布服务器,支持录制发布流
- RTMP[S] 播放器,支持RTMP代理,支持生成静音音频
- RTMP[S] 推流客户端
- 支持http[s]-flv直播
- 支持websocket-flv直播
- 支持任意编码格式的rtmp推流,只是除H264/H265/AAC/G711外无法转协议
......@@ -55,6 +54,7 @@
- 支持HLS文件生成,自带HTTP文件服务器
- 通过cookie追踪技术,可以模拟HLS播放为长连接,实现丰富的业务逻辑
- 支持完备的HLS用户追踪、播放统计等业务功能,可以实现HLS按需拉流等业务
- 支持HLS播发器,支持拉流HLS转rtsp/rtmp/mp4
- HTTP[S]
- 服务器支持`目录索引生成`,`文件下载`,`表单提交请求`
......@@ -81,11 +81,15 @@
- 支持按需拉流,无人观看自动关断拉流
- 支持先拉流后推流,提高及时推流画面打开率
- 提供c api sdk
- 支持FFmpeg拉流代理任意格式的流
- 支持http api生成并返回实时截图
## 更新日志
- 2020/5/17 新增支持hls播发器,支持hls拉流代理
## 编译以及测试
请参考wiki:[快速开始](https://github.com/xiongziliang/ZLMediaKit/wiki/%E5%BF%AB%E9%80%9F%E5%BC%80%E5%A7%8B)
## 编译以及测试
**编译前务必仔细参考wiki:[快速开始](https://github.com/xiongziliang/ZLMediaKit/wiki/%E5%BF%AB%E9%80%9F%E5%BC%80%E5%A7%8B)操作!!!**
## 怎么使用
......@@ -114,8 +118,12 @@ bash build_docker_images.sh
- [IOS摄像头实时录制,生成rtsp/rtmp/hls/http-flv](https://gitee.com/xiahcu/IOSMedia)
- [IOS rtmp/rtsp播放器,视频推流器](https://gitee.com/xiahcu/IOSPlayer)
- [支持linux、windows、mac的rtmp/rtsp播放器](https://github.com/xiongziliang/ZLMediaPlayer)
- [配套的管理WEB网站](https://github.com/chenxiaolei/ZLMediaKit_NVR_UI)
- [基于ZLMediaKit分支的管理WEB网站](https://github.com/chenxiaolei/ZLMediaKit_NVR_UI)
- [基于ZLMediaKit主线的管理WEB网站](https://gitee.com/kkkkk5G/MediaServerUI)
- [DotNetCore的RESTful客户端](https://github.com/MingZhuLiu/ZLMediaKit.DotNetCore.Sdk)
- [GB28181-2016网络视频平台](https://github.com/swwheihei/wvp)
- [node-js版本的GB28181平台](https://gitee.com/hfwudao/GB28181_Node_Http)
## 授权协议
......
![logo](https://raw.githubusercontent.com/zlmediakit/ZLMediaKit/master/logo.png)
![logo](https://raw.githubusercontent.com/zlmediakit/ZLMediaKit/master/www/logo.png)
# A lightweight ,high performance and stable stream server and client framework based on C++11.
......@@ -15,18 +15,18 @@
## Features
- RTSP
- RTSP[S]
- RTSP[S] server,support rtsp push.
- RTSP player and pusher.
- RTSP[S] player and pusher.
- RTP Transport : `rtp over udp` `rtp over tcp` `rtp over http` `rtp udp multicast` .
- Basic/Digest/Url Authentication.
- H264/H265/AAC/G711 codec.
- Recorded as mp4.
- Vod of mp4.
- RTMP
- RTMP server,support player and pusher.
- RTMP player and pusher.
- RTMP[S]
- RTMP[S] server,support player and pusher.
- RTMP[S] player and pusher.
- Support HTTP-FLV player.
- H264/H265/AAC/G711 codec.
- Recorded as flv or mp4.
......@@ -36,6 +36,7 @@
- HLS
- RTSP RTMP can be converted into HLS,built-in HTTP server.
- Play authentication based on cookie.
- Support HLS player, support streaming HLS proxy to RTSP / RTMP / MP4.
- HTTP[S]
- HTTP server,suppor directory meun、RESTful http api.
......@@ -53,6 +54,7 @@
- Play and push authentication.
- Pull stream on Demand.
- Support TS / PS streaming push through RTP,and it can be converted to RTSP / RTMP / HLS / FLV.
- Support real-time online screenshot http api.
- Protocol conversion:
......@@ -67,6 +69,7 @@
| RTMP --> MP4 | Y | Y | Y | N |
| MP4 --> RTSP[S] | Y | Y | Y | N |
| MP4 --> RTMP | Y | Y | Y | N |
| HLS --> RTSP/RTMP/MP4 | Y | Y | Y | N |
- Stream generation:
......@@ -106,7 +109,7 @@
| RTMP Pusher | Y |
| HTTP[S] | Y |
| WebSocket[S] | Y |
| HLS player | Y |
## System Requirements
......
......@@ -36,6 +36,22 @@ API_EXPORT mk_thread API_CALL mk_thread_from_tcp_session(mk_tcp_session ctx);
*/
API_EXPORT mk_thread API_CALL mk_thread_from_tcp_client(mk_tcp_client ctx);
/**
* 根据负载均衡算法,从事件线程池中随机获取一个事件线程
* 如果在事件线程内执行此函数将返回本事件线程
* 事件线程指的是定时器、网络io事件线程
* @return 事件线程
*/
API_EXPORT mk_thread API_CALL mk_thread_from_pool();
/**
* 根据负载均衡算法,从后台线程池中随机获取一个线程
* 后台线程本质与事件线程相同,只是优先级更低,同时可以执行短时间的阻塞任务
* ZLMediaKit中后台线程用于dns解析、mp4点播时的文件解复用
* @return 后台线程
*/
API_EXPORT mk_thread API_CALL mk_thread_from_pool_work();
///////////////////////////////////////////线程切换/////////////////////////////////////////////
typedef void (API_CALL *on_mk_async)(void *user_data);
......
......@@ -144,10 +144,12 @@ API_EXPORT uint16_t API_CALL mk_tcp_server_start(uint16_t port, mk_tcp_type type
s_tcp_server[type]->start<TcpSessionWithSSL<TcpSessionForC> >(port);
break;
case mk_type_ws:
s_tcp_server[type]->start<WebSocketSession<TcpSessionForC, HttpSession>>(port);
//此处你也可以修改WebSocketHeader::BINARY
s_tcp_server[type]->start<WebSocketSession<TcpSessionForC, HttpSession, WebSocketHeader::TEXT> >(port);
break;
case mk_type_wss:
s_tcp_server[type]->start<WebSocketSession<TcpSessionForC, HttpsSession>>(port);
//此处你也可以修改WebSocketHeader::BINARY
s_tcp_server[type]->start<WebSocketSession<TcpSessionForC, HttpsSession, WebSocketHeader::TEXT> >(port);
break;
default:
return 0;
......@@ -208,8 +210,10 @@ TcpClientForC::Ptr *mk_tcp_client_create_l(mk_tcp_client_events *events, mk_tcp_
case mk_type_ssl:
return (TcpClientForC::Ptr *)new shared_ptr<TcpSessionWithSSL<TcpClientForC> >(new TcpSessionWithSSL<TcpClientForC>(events));
case mk_type_ws:
//此处你也可以修改WebSocketHeader::BINARY
return (TcpClientForC::Ptr *)new shared_ptr<WebSocketClient<TcpClientForC, WebSocketHeader::TEXT, false> >(new WebSocketClient<TcpClientForC, WebSocketHeader::TEXT, false>(events));
case mk_type_wss:
//此处你也可以修改WebSocketHeader::BINARY
return (TcpClientForC::Ptr *)new shared_ptr<WebSocketClient<TcpClientForC, WebSocketHeader::TEXT, true> >(new WebSocketClient<TcpClientForC, WebSocketHeader::TEXT, true>(events));
default:
return nullptr;
......
......@@ -12,6 +12,7 @@
#include "mk_tcp_private.h"
#include "Util/logger.h"
#include "Poller/EventPoller.h"
#include "Thread/WorkThreadPool.h"
using namespace std;
using namespace toolkit;
......@@ -27,6 +28,14 @@ API_EXPORT mk_thread API_CALL mk_thread_from_tcp_client(mk_tcp_client ctx){
return (*client)->getPoller().get();
}
API_EXPORT mk_thread API_CALL mk_thread_from_pool(){
return EventPollerPool::Instance().getPoller().get();
}
API_EXPORT mk_thread API_CALL mk_thread_from_pool_work(){
return WorkThreadPool::Instance().getPoller().get();
}
API_EXPORT void API_CALL mk_async_do(mk_thread ctx,on_mk_async cb, void *user_data){
assert(ctx && cb);
EventPoller *poller = (EventPoller *)ctx;
......
......@@ -4,12 +4,18 @@ apiDebug=1
#一些比较敏感的http api在访问时需要提供secret,否则无权限调用
#如果是通过127.0.0.1访问,那么可以不提供secret
secret=035c73f7-bb6b-4889-a715-d9eb2d1925cc
#截图保存路径根目录,截图通过http api(/index/api/getSnap)生成和获取
snapRoot=./www/snap/
#默认截图图片,在启动FFmpeg截图后但是截图还未生成时,可以返回默认的预设图片
defaultSnap=./www/logo.png
[ffmpeg]
#FFmpeg可执行程序绝对路径
bin=/usr/local/bin/ffmpeg
#FFmpeg拉流再推流的命令模板,通过该模板可以设置再编码的一些参数
cmd=%s -re -i %s -c:a aac -strict -2 -ar 44100 -ab 48k -c:v libx264 -f flv %s
#FFmpeg生成截图的命令,可以通过修改该配置改变截图分辨率或质量
snap=%s -i %s -y -f mjpeg -t 0.001 %s
#FFmpeg日志的路径,如果置空则不生成FFmpeg日志
#可以为相对(相对于本可执行程序目录)或绝对路径
log=./ffmpeg/ffmpeg.log
......@@ -43,6 +49,11 @@ publishToMP4=0
#合并写缓存大小(单位毫秒),合并写指服务器缓存一定的数据后才会一次性写入socket,这样能提高性能,但是会提高延时
#开启后会同时关闭TCP_NODELAY并开启MSG_MORE
mergeWriteMS=0
#全局的时间戳覆盖开关,在转协议时,对frame进行时间戳覆盖
#该开关对rtsp/rtmp/rtp推流、rtsp/rtmp/hls拉流代理转协议时生效
#会直接影响rtsp/rtmp/hls/mp4/flv等协议的时间戳
#同协议情况下不影响(例如rtsp/rtmp推流,那么播放rtsp/rtmp时不会影响时间戳)
modifyStamp=0
[hls]
#hls写文件的buf大小,调整参数可以提高文件io性能
......@@ -76,8 +87,9 @@ on_publish=https://127.0.0.1/index/hook/on_publish
on_record_mp4=https://127.0.0.1/index/hook/on_record_mp4
#rtsp播放鉴权事件,此事件中比对rtsp的用户名密码
on_rtsp_auth=https://127.0.0.1/index/hook/on_rtsp_auth
#rtsp播放是否开启鉴权事件,置空则关闭rtsp鉴权。rtsp播放鉴权还支持url方式鉴权
#rtsp播放是否开启专属鉴权事件,置空则关闭rtsp鉴权。rtsp播放鉴权还支持url方式鉴权
#建议开发者统一采用url参数方式鉴权,rtsp用户名密码鉴权一般在设备上用的比较多
#开启rtsp专属鉴权后,将不再触发on_play鉴权事件
on_rtsp_realm=https://127.0.0.1/index/hook/on_rtsp_realm
#远程telnet调试鉴权事件
on_shell_login=https://127.0.0.1/index/hook/on_shell_login
......
......@@ -13,26 +13,27 @@
#include "Common/MediaSource.h"
#include "Util/File.h"
#include "System.h"
#include "Thread/WorkThreadPool.h"
namespace FFmpeg {
#define FFmpeg_FIELD "ffmpeg."
const string kBin = FFmpeg_FIELD"bin";
const string kCmd = FFmpeg_FIELD"cmd";
const string kLog = FFmpeg_FIELD"log";
const string kSnap = FFmpeg_FIELD"snap";
onceToken token([]() {
#ifdef _WIN32
string ffmpeg_bin = System::execute("where ffmpeg");
//windows下先关闭FFmpeg日志(目前不支持日志重定向)
mINI::Instance()[kCmd] = "%s -re -i \"%s\" -loglevel quiet -c:a aac -strict -2 -ar 44100 -ab 48k -c:v libx264 -f flv %s ";
string ffmpeg_bin = trim(System::execute("where ffmpeg"));
#else
string ffmpeg_bin = System::execute("which ffmpeg");
mINI::Instance()[kCmd] = "%s -re -i \"%s\" -c:a aac -strict -2 -ar 44100 -ab 48k -c:v libx264 -f flv %s ";
string ffmpeg_bin = trim(System::execute("which ffmpeg"));
#endif
//默认ffmpeg命令路径为环境变量中路径
mINI::Instance()[kBin] = ffmpeg_bin.empty() ? "ffmpeg" : ffmpeg_bin;
//ffmpeg日志保存路径
mINI::Instance()[kLog] = "./ffmpeg/ffmpeg.log";
mINI::Instance()[kCmd] = "%s -re -i %s -c:a aac -strict -2 -ar 44100 -ab 48k -c:v libx264 -f flv %s";
mINI::Instance()[kSnap] = "%s -i %s -y -f mjpeg -t 0.001 %s";
});
}
......@@ -114,8 +115,7 @@ void FFmpegSource::findAsync(int maxWaitMS, const function<void(const MediaSourc
auto src = MediaSource::find(_media_info._schema,
_media_info._vhost,
_media_info._app,
_media_info._streamid,
false);
_media_info._streamid);
if(src || !maxWaitMS){
cb(src);
return;
......@@ -196,7 +196,19 @@ void FFmpegSource::startTimer(int timeout_ms) {
//推流给其他服务器的,我们通过判断FFmpeg进程是否在线,如果FFmpeg推流中断,那么它应该会自动退出
if (!strongSelf->_process.wait(false)) {
//ffmpeg不在线,重新拉流
strongSelf->play(strongSelf->_src_url, strongSelf->_dst_url, timeout_ms, [](const SockException &) {});
strongSelf->play(strongSelf->_src_url, strongSelf->_dst_url, timeout_ms, [weakSelf](const SockException &ex) {
if(!ex){
//没有错误
return;
}
auto strongSelf = weakSelf.lock();
if (!strongSelf) {
//自身已经销毁
return;
}
//上次重试时间超过10秒,那么再重试FFmpeg拉流
strongSelf->startTimer(10 * 1000);
});
}
}
return true;
......@@ -232,3 +244,31 @@ void FFmpegSource::onGetMediaSource(const MediaSource::Ptr &src) {
_listener = src->getListener();
src->setListener(shared_from_this());
}
void FFmpegSnap::makeSnap(const string &play_url, const string &save_path, float timeout_sec, const function<void(bool)> &cb) {
GET_CONFIG(string,ffmpeg_bin,FFmpeg::kBin);
GET_CONFIG(string,ffmpeg_snap,FFmpeg::kSnap);
GET_CONFIG(string,ffmpeg_log,FFmpeg::kLog);
std::shared_ptr<Process> process = std::make_shared<Process>();
auto delayTask = EventPollerPool::Instance().getPoller()->doDelayTask(timeout_sec * 1000,[process,cb](){
if(process->wait(false)){
//FFmpeg进程还在运行,超时就关闭它
process->kill(2000);
}
return 0;
});
WorkThreadPool::Instance().getPoller()->async([process,play_url,save_path,delayTask,cb](){
char cmd[1024] = {0};
snprintf(cmd, sizeof(cmd),ffmpeg_snap.data(),ffmpeg_bin.data(),play_url.data(),save_path.data());
process->run(cmd,ffmpeg_log.empty() ? "" : File::absolutePath("",ffmpeg_log));
//等待FFmpeg进程退出
process->wait(true);
//FFmpeg进程退出了可以取消定时器了
delayTask->cancel();
//执行回调函数
cb(process->exit_code() == 0);
});
}
......@@ -23,6 +23,23 @@ using namespace std;
using namespace toolkit;
using namespace mediakit;
namespace FFmpeg {
extern const string kSnap;
}
class FFmpegSnap {
public:
/// 创建截图
/// \param play_url 播放url地址,只要FFmpeg支持即可
/// \param save_path 截图jpeg文件保存路径
/// \param timeout_sec 生成截图超时时间(防止阻塞太久)
/// \param cb 生成截图成功与否回调
static void makeSnap(const string &play_url, const string &save_path, float timeout_sec, const function<void(bool)> &cb);
private:
FFmpegSnap() = delete;
~FFmpegSnap() = delete;
};
class FFmpegSource : public std::enable_shared_from_this<FFmpegSource> , public MediaSourceEvent{
public:
typedef shared_ptr<FFmpegSource> Ptr;
......
......@@ -10,13 +10,13 @@
#include <limits.h>
#include <sys/stat.h>
#ifndef _WIN32
#include <sys/resource.h>
#include <unistd.h>
#else
//#include <TlHelp32.h>
#include <windows.h>
#include <io.h>
#endif
#include <stdexcept>
......@@ -32,68 +32,83 @@ using namespace toolkit;
void Process::run(const string &cmd, const string &log_file_tmp) {
kill(2000);
#ifdef _WIN32
STARTUPINFO si;
PROCESS_INFORMATION pi;
ZeroMemory(&si, sizeof(si)); //结构体初始化;
ZeroMemory(&pi, sizeof(pi));
STARTUPINFO si = {0};
PROCESS_INFORMATION pi = {0};
string log_file;
if (log_file_tmp.empty()) {
//未指定子进程日志文件时,重定向至/dev/null
log_file = "NUL";
} else {
log_file = StrPrinter << log_file_tmp << "." << getCurrentMillisecond();
}
LPTSTR lpDir = const_cast<char*>(cmd.data());
//重定向shell日志至文件
auto fp = File::create_file(log_file.data(), "ab");
if (!fp) {
fprintf(stderr, "open log file %s failed:%d(%s)\r\n", log_file.data(), get_uv_error(), get_uv_errmsg());
} else {
auto log_fd = (HANDLE)(_get_osfhandle(fileno(fp)));
// dup to stdout and stderr.
si.wShowWindow = SW_HIDE;
// STARTF_USESHOWWINDOW:The wShowWindow member contains additional information.
// STARTF_USESTDHANDLES:The hStdInput, hStdOutput, and hStdError members contain additional information.
si.dwFlags = STARTF_USESHOWWINDOW | STARTF_USESTDHANDLES;
si.hStdError = log_fd;
si.hStdOutput = log_fd;
}
if (CreateProcess(NULL, lpDir, NULL, NULL, FALSE, 0, NULL, NULL, &si, &pi)){
LPTSTR lpDir = const_cast<char*>(cmd.data());
if (CreateProcess(NULL, lpDir, NULL, NULL, TRUE, 0, NULL, NULL, &si, &pi)){
//下面两行关闭句柄,解除本进程和新进程的关系,不然有可能 不小心调用TerminateProcess函数关掉子进程
CloseHandle(pi.hProcess);
CloseHandle(pi.hThread);
_pid = pi.dwProcessId;
InfoL << "start child proces " << _pid;
_handle = pi.hProcess;
fprintf(fp, "\r\n\r\n#### pid=%d,cmd=%s #####\r\n\r\n", _pid, cmd.data());
InfoL << "start child process " << _pid << ", log file:" << log_file;
} else {
WarnL << "start child proces fail: " << GetLastError();
WarnL << "start child process fail: " << get_uv_errmsg();
}
fclose(fp);
#else
_pid = fork();
if (_pid < 0) {
throw std::runtime_error(StrPrinter << "fork child process falied,err:" << get_uv_errmsg());
throw std::runtime_error(StrPrinter << "fork child process failed,err:" << get_uv_errmsg());
}
if (_pid == 0) {
string log_file;
if (log_file_tmp.empty()) {
//未指定子进程日志文件时,重定向至/dev/null
log_file = "/dev/null";
} else {
log_file = StrPrinter << log_file_tmp << "." << getpid();
}
//子进程关闭core文件生成
struct rlimit rlim = { 0,0 };
struct rlimit rlim = {0, 0};
setrlimit(RLIMIT_CORE, &rlim);
//在启动子进程时,暂时禁用SIGINT、SIGTERM信号
// ignore the SIGINT and SIGTERM
signal(SIGINT, SIG_IGN);
signal(SIGTERM, SIG_IGN);
string log_file;
if (log_file_tmp.empty()) {
log_file = "/dev/null";
}
else {
log_file = StrPrinter << log_file_tmp << "." << getpid();
}
int log_fd = -1;
int flags = O_CREAT | O_WRONLY | O_APPEND;
mode_t mode = S_IRWXO | S_IRWXG | S_IRWXU;// S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH;
File::create_path(log_file.data(), mode);
if ((log_fd = ::open(log_file.c_str(), flags, mode)) < 0) {
fprintf(stderr, "open log file %s failed:%d(%s)\r\n", log_file.data(), errno, strerror(errno));
}
else {
//重定向shell日志至文件
auto fp = File::create_file(log_file.data(), "ab");
if (!fp) {
fprintf(stderr, "open log file %s failed:%d(%s)\r\n", log_file.data(), get_uv_error(), get_uv_errmsg());
} else {
auto log_fd = fileno(fp);
// dup to stdout and stderr.
if (dup2(log_fd, STDOUT_FILENO) < 0) {
fprintf(stderr, "dup2 stdout file %s failed:%d(%s)\r\n", log_file.data(), errno, strerror(errno));
fprintf(stderr, "dup2 stdout file %s failed:%d(%s)\r\n", log_file.data(), get_uv_error(), get_uv_errmsg());
}
if (dup2(log_fd, STDERR_FILENO) < 0) {
fprintf(stderr, "dup2 stderr file %s failed:%d(%s)\r\n", log_file.data(), errno, strerror(errno));
fprintf(stderr, "dup2 stderr file %s failed:%d(%s)\r\n", log_file.data(), get_uv_error(), get_uv_errmsg());
}
// close log fd
::close(log_fd);
// 关闭日志文件
::fclose(fp);
}
fprintf(stderr, "\r\n\r\n#### pid=%d,cmd=%s #####\r\n\r\n", getpid(), cmd.data());
// close other fds
// TODO: do in right way.
//关闭父进程继承的fd
for (int i = 3; i < 1024; i++) {
::close(i);
}
......@@ -101,9 +116,9 @@ void Process::run(const string &cmd, const string &log_file_tmp) {
auto params = split(cmd, " ");
// memory leak in child process, it's ok.
char **charpv_params = new char *[params.size() + 1];
for (int i = 0; i < (int)params.size(); i++) {
for (int i = 0; i < (int) params.size(); i++) {
std::string &p = params[i];
charpv_params[i] = (char *)p.data();
charpv_params[i] = (char *) p.data();
}
// EOF: NULL
charpv_params[params.size()] = NULL;
......@@ -111,11 +126,19 @@ void Process::run(const string &cmd, const string &log_file_tmp) {
// TODO: execv or execvp
auto ret = execv(params[0].c_str(), charpv_params);
if (ret < 0) {
fprintf(stderr, "fork process failed, errno=%d(%s)\r\n", errno, strerror(errno));
fprintf(stderr, "fork process failed:%d(%s)\r\n", get_uv_error(), get_uv_errmsg());
}
exit(ret);
}
InfoL << "start child proces " << _pid;
string log_file;
if (log_file_tmp.empty()) {
//未指定子进程日志文件时,重定向至/dev/null
log_file = "/dev/null";
} else {
log_file = StrPrinter << log_file_tmp << "." << _pid;
}
InfoL << "start child process " << _pid << ", log file:" << log_file;
#endif // _WIN32
}
......@@ -126,24 +149,41 @@ void Process::run(const string &cmd, const string &log_file_tmp) {
* @param block 是否阻塞等待
* @return 进程是否还在运行
*/
static bool s_wait(pid_t pid,int *exit_code_ptr,bool block) {
static bool s_wait(pid_t pid, void *handle, int *exit_code_ptr, bool block) {
if (pid <= 0) {
return false;
}
int status = 0;
#ifdef _WIN32
HANDLE hProcess = NULL;
hProcess = OpenProcess(PROCESS_TERMINATE, FALSE, pid); //打开目标进程
if (hProcess == NULL) {
DWORD code = 0;
if (block) {
//一直等待
code = WaitForSingleObject(handle, INFINITE);
} else {
code = WaitForSingleObject(handle, 0);
}
if(code == WAIT_FAILED || code == WAIT_OBJECT_0){
//子进程已经退出了,获取子进程退出代码
DWORD exitCode = 0;
if(exit_code_ptr && GetExitCodeProcess(handle, &exitCode)){
*exit_code_ptr = exitCode;
}
return false;
}
CloseHandle(hProcess);
if(code == WAIT_TIMEOUT){
//子进程还在线
return true;
}
//不太可能运行到此处
WarnL << "WaitForSingleObject ret:" << code;
return false;
#else
int status = 0;
pid_t p = waitpid(pid, &status, block ? 0 : WNOHANG);
int exit_code = (status & 0xFF00) >> 8;
if (exit_code_ptr) {
*exit_code_ptr = (status & 0xFF00) >> 8;
*exit_code_ptr = exit_code;
}
if (p < 0) {
WarnL << "waitpid failed, pid=" << pid << ", err=" << get_uv_errmsg();
......@@ -153,26 +193,57 @@ static bool s_wait(pid_t pid,int *exit_code_ptr,bool block) {
InfoL << "process terminated, pid=" << pid << ", exit code=" << exit_code;
return false;
}
return true;
#endif // _WIN32
}
return true;
#ifdef _WIN32
// Inspired from http://stackoverflow.com/a/15281070/1529139
// and http://stackoverflow.com/q/40059902/1529139
bool signalCtrl(DWORD dwProcessId, DWORD dwCtrlEvent){
bool success = false;
DWORD thisConsoleId = GetCurrentProcessId();
// Leave current console if it exists
// (otherwise AttachConsole will return ERROR_ACCESS_DENIED)
bool consoleDetached = (FreeConsole() != FALSE);
if (AttachConsole(dwProcessId) != FALSE){
// Add a fake Ctrl-C handler for avoid instant kill is this console
// WARNING: do not revert it or current program will be also killed
SetConsoleCtrlHandler(nullptr, true);
success = (GenerateConsoleCtrlEvent(dwCtrlEvent, 0) != FALSE);
FreeConsole();
}
if (consoleDetached){
// Create a new console if previous was deleted by OS
if (AttachConsole(thisConsoleId) == FALSE){
int errorCode = GetLastError();
if (errorCode == 31){
// 31=ERROR_GEN_FAILURE
AllocConsole();
}
}
}
return success;
}
#endif // _WIN32
static void s_kill(pid_t pid,int max_delay,bool force){
static void s_kill(pid_t pid, void *handle, int max_delay, bool force) {
if (pid <= 0) {
//pid无效
return;
}
#ifdef _WIN32
HANDLE hProcess = NULL;
hProcess = OpenProcess(PROCESS_TERMINATE, FALSE, pid); //打开目标进程
if (hProcess == NULL) {
WarnL << "\nOpen Process fAiled: " << GetLastError();
return;
}
DWORD ret = TerminateProcess(hProcess, 0); //结束目标进程
if (ret == 0) {
WarnL << GetLastError;
//windows下目前没有比较好的手段往子进程发送SIGTERM或信号
//所以杀死子进程的方式全部强制为立即关闭
force = true;
if(force){
//强制关闭子进程
TerminateProcess(handle, 0);
}else{
//非强制关闭,发送Ctr+C信号
signalCtrl(pid, CTRL_C_EVENT);
}
#else
if (::kill(pid, force ? SIGKILL : SIGTERM) == -1) {
......@@ -182,33 +253,38 @@ static void s_kill(pid_t pid,int max_delay,bool force){
}
#endif // _WIN32
if(force){
if (force) {
//发送SIGKILL信号后,阻塞等待退出
s_wait(pid, NULL, true);
s_wait(pid, handle, nullptr, true);
DebugL << "force kill " << pid << " success!";
return;
}
//发送SIGTERM信号后,2秒后检查子进程是否已经退出
WorkThreadPool::Instance().getPoller()->doDelayTask(max_delay,[pid](){
if (!s_wait(pid, nullptr, false)) {
WorkThreadPool::Instance().getPoller()->doDelayTask(max_delay, [pid, handle]() {
if (!s_wait(pid, handle, nullptr, false)) {
//进程已经退出了
return 0;
}
//进程还在运行
WarnL << "process still working,force kill it:" << pid;
s_kill(pid,0, true);
s_kill(pid, handle, 0, true);
return 0;
});
}
void Process::kill(int max_delay,bool force) {
void Process::kill(int max_delay, bool force) {
if (_pid <= 0) {
return;
}
s_kill(_pid,max_delay,force);
s_kill(_pid, _handle, max_delay, force);
_pid = -1;
#ifdef _WIN32
if(_handle){
CloseHandle(_handle);
_handle = nullptr;
}
#endif
}
Process::~Process() {
......@@ -218,7 +294,7 @@ Process::~Process() {
Process::Process() {}
bool Process::wait(bool block) {
return s_wait(_pid,&_exit_code,block);
return s_wait(_pid, _handle, &_exit_code, block);
}
int Process::exit_code() {
......
......@@ -31,6 +31,7 @@ public:
int exit_code();
private:
pid_t _pid = -1;
void *_handle = nullptr;
int _exit_code = 0;
};
......
......@@ -52,7 +52,7 @@ string System::execute(const string &cmd) {
#if !defined(ANDROID) && !defined(_WIN32)
static string addr2line(const string &address) {
string cmd = StrPrinter << "addr2line -e " << exePath() << " " << address;
string cmd = StrPrinter << "addr2line -C -f -e " << exePath() << " " << address;
return System::execute(cmd);
}
......
......@@ -8,11 +8,12 @@
* may be found in the AUTHORS file in the root of the source tree.
*/
#include <sys/stat.h>
#include <math.h>
#include <signal.h>
#include <functional>
#include <sstream>
#include <unordered_map>
#include <math.h>
#include "jsoncpp/json.h"
#include "Util/util.h"
#include "Util/logger.h"
......@@ -50,10 +51,14 @@ typedef enum {
#define API_FIELD "api."
const string kApiDebug = API_FIELD"apiDebug";
const string kSecret = API_FIELD"secret";
const string kSnapRoot = API_FIELD"snapRoot";
const string kDefaultSnap = API_FIELD"defaultSnap";
static onceToken token([]() {
mINI::Instance()[kApiDebug] = "1";
mINI::Instance()[kSecret] = "035c73f7-bb6b-4889-a715-d9eb2d1925cc";
mINI::Instance()[kSnapRoot] = "./www/snap/";
mINI::Instance()[kDefaultSnap] = "./www/logo.png";
});
}//namespace API
......@@ -145,7 +150,6 @@ static inline void addHttpListener(){
NoticeCenter::Instance().addListener(nullptr, Broadcast::kBroadcastHttpRequest, [](BroadcastHttpRequestArgs) {
auto it = s_map_api.find(parser.Url());
if (it == s_map_api.end()) {
consumed = false;
return;
}
//该api已被消费
......@@ -174,7 +178,7 @@ static inline void addHttpListener(){
size = body->remainSize();
}
if(size < 4 * 1024){
if(size && size < 4 * 1024){
string contentOut = body->readData(size)->toString();
DebugL << "\r\n# request:\r\n" << parser.Method() << " " << parser.FullUrl() << "\r\n"
<< "# content:\r\n" << parser.Content() << "\r\n"
......@@ -436,14 +440,14 @@ void installWebApi() {
api_regist1("/index/api/isMediaOnline",[](API_ARGS1){
CHECK_SECRET();
CHECK_ARGS("schema","vhost","app","stream");
val["online"] = (bool) (MediaSource::find(allArgs["schema"],allArgs["vhost"],allArgs["app"],allArgs["stream"],false));
val["online"] = (bool) (MediaSource::find(allArgs["schema"],allArgs["vhost"],allArgs["app"],allArgs["stream"]));
});
//测试url http://127.0.0.1/index/api/getMediaInfo?schema=rtsp&vhost=__defaultVhost__&app=live&stream=obs
api_regist1("/index/api/getMediaInfo",[](API_ARGS1){
CHECK_SECRET();
CHECK_ARGS("schema","vhost","app","stream");
auto src = MediaSource::find(allArgs["schema"],allArgs["vhost"],allArgs["app"],allArgs["stream"],false);
auto src = MediaSource::find(allArgs["schema"],allArgs["vhost"],allArgs["app"],allArgs["stream"]);
if(!src){
val["online"] = false;
return;
......@@ -817,6 +821,78 @@ void installWebApi() {
val["data"]["paths"] = paths;
});
static auto responseSnap = [](const string &snap_path,
const HttpSession::KeyValue &headerIn,
const HttpSession::HttpResponseInvoker &invoker) {
StrCaseMap headerOut;
struct stat statbuf = {0};
GET_CONFIG(string, defaultSnap, API::kDefaultSnap);
if (!(stat(snap_path.data(), &statbuf) == 0 && statbuf.st_size != 0) && !defaultSnap.empty()) {
//空文件且设置了预设图,则返回预设图片(也就是FFmpeg生成截图中空档期的默认图片)
const_cast<string&>(snap_path) = File::absolutePath(defaultSnap, "");
headerOut["Content-Type"] = HttpFileManager::getContentType(snap_path.data());
} else {
//之前生成的截图文件,我们默认为jpeg格式
headerOut["Content-Type"] = HttpFileManager::getContentType(".jpeg");
}
//返回图片给http客户端
invoker.responseFile(headerIn, headerOut, snap_path);
};
//获取截图缓存或者实时截图
//http://127.0.0.1/index/api/getSnap?url=rtmp://127.0.0.1/record/robot.mp4&timeout_sec=10&expire_sec=3
api_regist2("/index/api/getSnap", [](API_ARGS2){
CHECK_SECRET();
CHECK_ARGS("url", "timeout_sec", "expire_sec");
GET_CONFIG(string, snap_root, API::kSnapRoot);
int expire_sec = allArgs["expire_sec"];
auto scan_path = File::absolutePath(MD5(allArgs["url"]).hexdigest(), snap_root) + "/";
string snap_path;
File::scanDir(scan_path, [&](const string &path, bool isDir) {
if (isDir) {
//忽略文件夹
return true;
}
//找到截图
auto tm = FindField(path.data() + scan_path.size(), nullptr, ".jpeg");
if (atoll(tm.data()) + expire_sec < time(NULL)) {
//截图已经过期,删除之,后面重新生成
File::delete_file(path.data());
return true;
}
//截图未过期,中断遍历,返回上次生成的截图
snap_path = path;
return false;
});
if(!snap_path.empty()){
responseSnap(snap_path, headerIn, invoker);
return;
}
//无截图或者截图已经过期
snap_path = StrPrinter << scan_path << time(NULL) << ".jpeg";
//生成一个空文件,目的是顺便创建文件夹路径,
//同时防止在FFmpeg生成截图途中不停的尝试调用该api启动FFmpeg生成相同的截图
auto file = File::create_file(snap_path.data(), "wb");
if (file) {
fclose(file);
}
//启动FFmpeg进程,开始截图
FFmpegSnap::makeSnap(allArgs["url"],snap_path,allArgs["timeout_sec"],[invoker,headerIn,snap_path](bool success){
if(!success){
//生成截图失败,可能残留空文件
File::delete_file(snap_path.data());
}
responseSnap(snap_path, headerIn, invoker);
});
});
////////////以下是注册的Hook API////////////
api_regist1("/index/hook/on_publish",[](API_ARGS1){
//开始推流事件
......
......@@ -78,14 +78,6 @@ void DevChannel::inputH264(const char *data, int len, uint32_t dts, uint32_t pts
if(pts == 0){
pts = dts;
}
int prefixeSize;
if (memcmp("\x00\x00\x00\x01", data, 4) == 0) {
prefixeSize = 4;
} else if (memcmp("\x00\x00\x01", data, 3) == 0) {
prefixeSize = 3;
} else {
prefixeSize = 0;
}
//由于rtmp/hls/mp4需要缓存时间戳相同的帧,
//所以使用FrameNoCacheAble类型的帧反而会在转换成FrameCacheAble时多次内存拷贝
......@@ -93,9 +85,8 @@ void DevChannel::inputH264(const char *data, int len, uint32_t dts, uint32_t pts
H264Frame::Ptr frame = std::make_shared<H264Frame>();
frame->_dts = dts;
frame->_pts = pts;
frame->_buffer.assign("\x00\x00\x00\x01",4);
frame->_buffer.append(data + prefixeSize, len - prefixeSize);
frame->_prefix_size = 4;
frame->_buffer.assign(data, len);
frame->_prefix_size = prefixSize(data,len);
inputFrame(frame);
}
......@@ -106,14 +97,6 @@ void DevChannel::inputH265(const char *data, int len, uint32_t dts, uint32_t pts
if(pts == 0){
pts = dts;
}
int prefixeSize;
if (memcmp("\x00\x00\x00\x01", data, 4) == 0) {
prefixeSize = 4;
} else if (memcmp("\x00\x00\x01", data, 3) == 0) {
prefixeSize = 3;
} else {
prefixeSize = 0;
}
//由于rtmp/hls/mp4需要缓存时间戳相同的帧,
//所以使用FrameNoCacheAble类型的帧反而会在转换成FrameCacheAble时多次内存拷贝
......@@ -121,9 +104,8 @@ void DevChannel::inputH265(const char *data, int len, uint32_t dts, uint32_t pts
H265Frame::Ptr frame = std::make_shared<H265Frame>();
frame->_dts = dts;
frame->_pts = pts;
frame->_buffer.assign("\x00\x00\x00\x01",4);
frame->_buffer.append(data + prefixeSize, len - prefixeSize);
frame->_prefix_size = 4;
frame->_buffer.assign(data, len);
frame->_prefix_size = prefixSize(data,len);
inputFrame(frame);
}
......@@ -163,7 +145,9 @@ void DevChannel::inputG711(const char *data, int len, uint32_t dts){
if (dts == 0) {
dts = (uint32_t)_aTicker[1].elapsedTime();
}
inputFrame(std::make_shared<G711FrameNoCacheAble>(_audio->codecId, (char*)data, len, dts, 0));
auto frame = std::make_shared<G711FrameNoCacheAble>((char*)data, len, dts, 0);
frame->setCodec(_audio->codecId);
inputFrame(frame);
}
void DevChannel::initVideo(const VideoInfo &info) {
......
......@@ -180,9 +180,8 @@ static void eraseIfEmpty(MAP &map, IT0 it0, IT1 it1, IT2 it2) {
}
};
void findAsync_l(const MediaInfo &info, const std::shared_ptr<TcpSession> &session, bool retry,
const function<void(const MediaSource::Ptr &src)> &cb){
auto src = MediaSource::find(info._schema, info._vhost, info._app, info._streamid, true);
void MediaSource::findAsync_l(const MediaInfo &info, const std::shared_ptr<TcpSession> &session, bool retry, const function<void(const MediaSource::Ptr &src)> &cb){
auto src = MediaSource::find_l(info._schema, info._vhost, info._app, info._streamid, true);
if(src || !retry){
cb(src);
return;
......@@ -248,7 +247,11 @@ void MediaSource::findAsync(const MediaInfo &info, const std::shared_ptr<TcpSess
return findAsync_l(info, session, true, cb);
}
MediaSource::Ptr MediaSource::find(const string &schema, const string &vhost_tmp, const string &app, const string &id, bool bMake) {
MediaSource::Ptr MediaSource::find(const string &schema, const string &vhost, const string &app, const string &id) {
return find_l(schema, vhost, app, id, false);
}
MediaSource::Ptr MediaSource::find_l(const string &schema, const string &vhost_tmp, const string &app, const string &id, bool bMake) {
string vhost = vhost_tmp;
if(vhost.empty()){
vhost = DEFAULT_VHOST;
......@@ -419,12 +422,10 @@ void MediaSourceEvent::onNoneReader(MediaSource &sender){
//如果mp4点播, 无人观看时我们强制关闭点播
bool is_mp4_vod = sender.getApp() == recordApp;
//无人观看mp4点播时,3秒后自动关闭
auto close_delay = is_mp4_vod ? 3.0 : stream_none_reader_delay / 1000.0;
//没有任何人观看该视频源,表明该源可以关闭了
weak_ptr<MediaSource> weakSender = sender.shared_from_this();
_async_close_timer = std::make_shared<Timer>(close_delay, [weakSender,is_mp4_vod]() {
_async_close_timer = std::make_shared<Timer>(stream_none_reader_delay / 1000.0, [weakSender,is_mp4_vod]() {
auto strongSender = weakSender.lock();
if (!strongSender) {
//对象已经销毁
......@@ -467,7 +468,7 @@ MediaSource::Ptr MediaSource::createFromMP4(const string &schema, const string &
try {
MP4Reader::Ptr pReader(new MP4Reader(vhost, app, stream, filePath));
pReader->startReadMP4();
return MediaSource::find(schema, vhost, app, stream, false);
return MediaSource::find(schema, vhost, app, stream);
} catch (std::exception &ex) {
WarnL << ex.what();
return nullptr;
......@@ -478,57 +479,51 @@ MediaSource::Ptr MediaSource::createFromMP4(const string &schema, const string &
#endif //ENABLE_MP4
}
static bool isFlushAble_default(bool is_audio, uint32_t last_stamp, uint32_t new_stamp, int cache_size) {
if (new_stamp < last_stamp) {
//时间戳回退(可能seek中)
static bool isFlushAble_default(bool is_video, uint32_t last_stamp, uint32_t new_stamp, int cache_size) {
if (new_stamp + 500 < last_stamp) {
//时间戳回退比较大(可能seek中),由于rtp中时间戳是pts,是可能存在一定程度的回退的
return true;
}
if (!is_audio) {
//这是视频,时间戳发送变化或者缓存超过1024个
//时间戳发送变化或者缓存超过1024个,sendmsg接口一般最多只能发送1024个数据包
return last_stamp != new_stamp || cache_size >= 1024;
}
//这是音频,缓存超过100ms或者缓存个数超过10个
return new_stamp > last_stamp + 100 || cache_size > 10;
}
static bool isFlushAble_merge(bool is_audio, uint32_t last_stamp, uint32_t new_stamp, int cache_size, int merge_ms) {
if (new_stamp < last_stamp) {
//时间戳回退(可能seek中)
static bool isFlushAble_merge(bool is_video, uint32_t last_stamp, uint32_t new_stamp, int cache_size, int merge_ms) {
if (new_stamp + 500 < last_stamp) {
//时间戳回退比较大(可能seek中),由于rtp中时间戳是pts,是可能存在一定程度的回退的
return true;
}
if(new_stamp > last_stamp + merge_ms){
if (new_stamp > last_stamp + merge_ms) {
//时间戳增量超过合并写阈值
return true;
}
if (!is_audio) {
//这是视频,缓存数超过1024个,这个逻辑用于避免时间戳异常的流导致的内存暴增问题
//缓存数超过1024个,这个逻辑用于避免时间戳异常的流导致的内存暴增问题
//而且sendmsg接口一般最多只能发送1024个数据包
return cache_size >= 1024;
}
//这是音频,音频缓存超过20个
return cache_size > 20;
}
bool FlushPolicy::isFlushAble(uint32_t new_stamp, int cache_size) {
bool ret = false;
bool FlushPolicy::isFlushAble(bool is_video, bool is_key, uint32_t new_stamp, int cache_size) {
bool flush_flag = false;
if (is_key && is_video) {
//遇到关键帧flush掉前面的数据,确保关键帧为该组数据的第一帧,确保GOP缓存有效
flush_flag = true;
} else {
GET_CONFIG(int, mergeWriteMS, General::kMergeWriteMS);
if (mergeWriteMS <= 0) {
//关闭了合并写或者合并写阈值小于等于0
ret = isFlushAble_default(_is_audio, _last_stamp, new_stamp, cache_size);
flush_flag = isFlushAble_default(is_video, _last_stamp[is_video], new_stamp, cache_size);
} else {
ret = isFlushAble_merge(_is_audio, _last_stamp, new_stamp, cache_size, mergeWriteMS);
flush_flag = isFlushAble_merge(is_video, _last_stamp[is_video], new_stamp, cache_size, mergeWriteMS);
}
}
if (ret) {
// DebugL << _is_audio << " " << _last_stamp << " " << new_stamp;
_last_stamp = new_stamp;
if (flush_flag) {
_last_stamp[is_video] = new_stamp;
}
return ret;
return flush_flag;
}
} /* namespace mediakit */
\ No newline at end of file
......@@ -134,7 +134,7 @@ public:
virtual bool isRecording(Recorder::type type);
// 同步查找流
static Ptr find(const string &schema, const string &vhost, const string &app, const string &id, bool bMake = true) ;
static Ptr find(const string &schema, const string &vhost, const string &app, const string &id);
// 异步查找流
static void findAsync(const MediaInfo &info, const std::shared_ptr<TcpSession> &session, const function<void(const Ptr &src)> &cb);
// 遍历所有流
......@@ -142,9 +142,14 @@ public:
// 从mp4文件生成MediaSource
static MediaSource::Ptr createFromMP4(const string &schema, const string &vhost, const string &app, const string &stream, const string &filePath = "", bool checkApp = true);
protected:
void regist() ;
bool unregist() ;
bool unregist();
private:
static Ptr find_l(const string &schema, const string &vhost, const string &app, const string &id, bool bMake);
static void findAsync_l(const MediaInfo &info, const std::shared_ptr<TcpSession> &session, bool retry, const function<void(const MediaSource::Ptr &src)> &cb);
private:
string _strSchema;
string _strVhost;
......@@ -159,10 +164,7 @@ private:
///缓存刷新策略类
class FlushPolicy {
public:
FlushPolicy(bool is_audio) {
_is_audio = is_audio;
};
FlushPolicy() = default;
~FlushPolicy() = default;
uint32_t getStamp(const RtpPacket::Ptr &packet) {
......@@ -173,45 +175,45 @@ public:
return packet->timeStamp;
}
bool isFlushAble(uint32_t new_stamp, int cache_size);
bool isFlushAble(bool is_video, bool is_key, uint32_t new_stamp, int cache_size);
private:
bool _is_audio;
uint32_t _last_stamp= 0;
uint32_t _last_stamp[2] = {0, 0};
};
/// 视频合并写缓存模板
/// 合并写缓存模板
/// \tparam packet 包类型
/// \tparam policy 刷新缓存策略
/// \tparam packet_list 包缓存类型
template<typename packet, typename policy = FlushPolicy, typename packet_list = List<std::shared_ptr<packet> > >
class VideoPacketCache {
class PacketCache {
public:
VideoPacketCache() : _policy(false) {
PacketCache(){
_cache = std::make_shared<packet_list>();
}
virtual ~VideoPacketCache() = default;
virtual ~PacketCache() = default;
void inputVideo(const std::shared_ptr<packet> &rtp, bool key_pos) {
if (_policy.isFlushAble(_policy.getStamp(rtp), _cache->size())) {
void inputPacket(bool is_video, const std::shared_ptr<packet> &pkt, bool key_pos) {
if (_policy.isFlushAble(is_video, key_pos, _policy.getStamp(pkt), _cache->size())) {
flushAll();
}
//追加数据到最后
_cache->emplace_back(rtp);
_cache->emplace_back(pkt);
if (key_pos) {
_key_pos = key_pos;
}
}
virtual void onFlushVideo(std::shared_ptr<packet_list> &, bool key_pos) = 0;
virtual void onFlush(std::shared_ptr<packet_list> &, bool key_pos) = 0;
private:
void flushAll() {
if (_cache->empty()) {
return;
}
onFlushVideo(_cache, _key_pos);
onFlush(_cache, _key_pos);
_cache = std::make_shared<packet_list>();
_key_pos = false;
}
......@@ -222,44 +224,5 @@ private:
bool _key_pos = false;
};
/// 音频频合并写缓存模板
/// \tparam packet 包类型
/// \tparam policy 刷新缓存策略
/// \tparam packet_list 包缓存类型
template<typename packet, typename policy = FlushPolicy, typename packet_list = List<std::shared_ptr<packet> > >
class AudioPacketCache {
public:
AudioPacketCache() : _policy(true) {
_cache = std::make_shared<packet_list>();
}
virtual ~AudioPacketCache() = default;
void inputAudio(const std::shared_ptr<packet> &rtp) {
if (_policy.isFlushAble(_policy.getStamp(rtp), _cache->size())) {
flushAll();
}
//追加数据到最后
_cache->emplace_back(rtp);
}
virtual void onFlushAudio(std::shared_ptr<packet_list> &) = 0;
private:
void flushAll() {
if (_cache->empty()) {
return;
}
onFlushAudio(_cache);
_cache = std::make_shared<packet_list>();
}
private:
policy _policy;
std::shared_ptr<packet_list> _cache;
};
} /* namespace mediakit */
#endif //ZLMEDIAKIT_MEDIASOURCE_H
\ No newline at end of file
......@@ -298,8 +298,69 @@ void MultiMediaSourceMuxer::resetTracks() {
_muxer->resetTracks();
}
//该类实现frame级别的时间戳覆盖
class FrameModifyStamp : public Frame{
public:
typedef std::shared_ptr<FrameModifyStamp> Ptr;
FrameModifyStamp(const Frame::Ptr &frame, Stamp &stamp){
_frame = frame;
//覆盖时间戳
stamp.revise(frame->dts(), frame->pts(), _dts, _pts, true);
}
~FrameModifyStamp() override {}
uint32_t dts() const override{
return _dts;
}
uint32_t pts() const override{
return _pts;
}
uint32_t prefixSize() const override {
return _frame->prefixSize();
}
bool keyFrame() const override {
return _frame->keyFrame();
}
bool configFrame() const override {
return _frame->configFrame();
}
bool cacheAble() const override {
return _frame->cacheAble();
}
char *data() const override {
return _frame->data();
}
uint32_t size() const override {
return _frame->size();
}
CodecId getCodecId() const override {
return _frame->getCodecId();
}
private:
Frame::Ptr _frame;
int64_t _dts;
int64_t _pts;
};
void MultiMediaSourceMuxer::inputFrame(const Frame::Ptr &frame) {
GET_CONFIG(bool,modify_stamp,General::kModifyStamp);
if(!modify_stamp){
//未开启时间戳覆盖
_muxer->inputFrame(frame);
}else{
//开启了时间戳覆盖
FrameModifyStamp::Ptr new_frame = std::make_shared<FrameModifyStamp>(frame,_stamp[frame->getTrackType()]);
//输入时间戳覆盖后的帧
_muxer->inputFrame(new_frame);
}
}
bool MultiMediaSourceMuxer::isEnabled(){
......
......@@ -178,6 +178,7 @@ public:
private:
MultiMuxerPrivate::Ptr _muxer;
std::weak_ptr<MediaSourceEvent> _listener;
Stamp _stamp[2];
};
}//namespace mediakit
......
......@@ -67,6 +67,7 @@ const string kPublishToRtxp = GENERAL_FIELD"publishToRtxp";
const string kPublishToHls = GENERAL_FIELD"publishToHls";
const string kPublishToMP4 = GENERAL_FIELD"publishToMP4";
const string kMergeWriteMS = GENERAL_FIELD"mergeWriteMS";
const string kModifyStamp = GENERAL_FIELD"modifyStamp";
onceToken token([](){
mINI::Instance()[kFlowThreshold] = 1024;
......@@ -79,6 +80,7 @@ onceToken token([](){
mINI::Instance()[kPublishToHls] = 1;
mINI::Instance()[kPublishToMP4] = 0;
mINI::Instance()[kMergeWriteMS] = 0;
mINI::Instance()[kModifyStamp] = 0;
},nullptr);
}//namespace General
......@@ -293,3 +295,10 @@ const string kBenchmarkMode = "benchmark_mode";
} // namespace mediakit
void Assert_Throw(int failed, const char *exp, const char *func, const char *file, int line){
if(failed) {
_StrPrinter printer;
printer << "Assertion failed: (" << exp << "), function " << func << ", file " << file << ", line " << line << ".";
throw std::runtime_error(printer);
}
}
......@@ -174,6 +174,8 @@ extern const string kPublishToMP4 ;
//合并写缓存大小(单位毫秒),合并写指服务器缓存一定的数据后才会一次性写入socket,这样能提高性能,但是会提高延时
//开启后会同时关闭TCP_NODELAY并开启MSG_MORE
extern const string kMergeWriteMS ;
//全局的时间戳覆盖开关,在转协议时,对frame进行时间戳覆盖
extern const string kModifyStamp;
}//namespace General
......@@ -217,6 +219,7 @@ extern const string kDirectProxy;
////////////RTMP服务器配置///////////
namespace Rtmp {
//rtmp推流时间戳覆盖开关
extern const string kModifyStamp;
//握手超时时间,默认15秒
extern const string kHandshakeSecond;
......
......@@ -9,65 +9,63 @@
*/
#include "AAC.h"
#ifdef ENABLE_MP4
#include "mpeg4-aac.h"
#endif
namespace mediakit{
void writeAdtsHeader(const AACFrame &hed, uint8_t *pcAdts) {
pcAdts[0] = (hed.syncword >> 4 & 0xFF); //8bit
pcAdts[1] = (hed.syncword << 4 & 0xF0); //4 bit
pcAdts[1] |= (hed.id << 3 & 0x08); //1 bit
pcAdts[1] |= (hed.layer << 1 & 0x06); //2bit
pcAdts[1] |= (hed.protection_absent & 0x01); //1 bit
pcAdts[2] = (hed.profile << 6 & 0xC0); // 2 bit
pcAdts[2] |= (hed.sf_index << 2 & 0x3C); //4bit
pcAdts[2] |= (hed.private_bit << 1 & 0x02); //1 bit
pcAdts[2] |= (hed.channel_configuration >> 2 & 0x03); //1 bit
pcAdts[3] = (hed.channel_configuration << 6 & 0xC0); // 2 bit
pcAdts[3] |= (hed.original << 5 & 0x20); //1 bit
pcAdts[3] |= (hed.home << 4 & 0x10); //1 bit
pcAdts[3] |= (hed.copyright_identification_bit << 3 & 0x08); //1 bit
pcAdts[3] |= (hed.copyright_identification_start << 2 & 0x04); //1 bit
pcAdts[3] |= (hed.aac_frame_length >> 11 & 0x03); //2 bit
pcAdts[4] = (hed.aac_frame_length >> 3 & 0xFF); //8 bit
pcAdts[5] = (hed.aac_frame_length << 5 & 0xE0); //3 bit
pcAdts[5] |= (hed.adts_buffer_fullness >> 6 & 0x1F); //5 bit
pcAdts[6] = (hed.adts_buffer_fullness << 2 & 0xFC); //6 bit
pcAdts[6] |= (hed.no_raw_data_blocks_in_frame & 0x03); //2 bit
unsigned const samplingFrequencyTable[16] = { 96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050, 16000, 12000, 11025, 8000, 7350, 0, 0, 0 };
class AdtsHeader{
public:
unsigned int syncword = 0; //12 bslbf 同步字The bit string ‘1111 1111 1111’,说明一个ADTS帧的开始
unsigned int id; //1 bslbf MPEG 标示符, 设置为1
unsigned int layer; //2 uimsbf Indicates which layer is used. Set to ‘00’
unsigned int protection_absent; //1 bslbf 表示是否误码校验
unsigned int profile; //2 uimsbf 表示使用哪个级别的AAC,如01 Low Complexity(LC)--- AACLC
unsigned int sf_index; //4 uimsbf 表示使用的采样率下标
unsigned int private_bit; //1 bslbf
unsigned int channel_configuration; //3 uimsbf 表示声道数
unsigned int original; //1 bslbf
unsigned int home; //1 bslbf
//下面的为改变的参数即每一帧都不同
unsigned int copyright_identification_bit; //1 bslbf
unsigned int copyright_identification_start; //1 bslbf
unsigned int aac_frame_length; // 13 bslbf 一个ADTS帧的长度包括ADTS头和raw data block
unsigned int adts_buffer_fullness; //11 bslbf 0x7FF 说明是码率可变的码流
//no_raw_data_blocks_in_frame 表示ADTS帧中有number_of_raw_data_blocks_in_frame + 1个AAC原始帧.
//所以说number_of_raw_data_blocks_in_frame == 0
//表示说ADTS帧中有一个AAC数据块并不是说没有。(一个AAC原始帧包含一段时间内1024个采样及相关数据)
unsigned int no_raw_data_blocks_in_frame; //2 uimsfb
};
static void dumpAdtsHeader(const AdtsHeader &hed, uint8_t *out) {
out[0] = (hed.syncword >> 4 & 0xFF); //8bit
out[1] = (hed.syncword << 4 & 0xF0); //4 bit
out[1] |= (hed.id << 3 & 0x08); //1 bit
out[1] |= (hed.layer << 1 & 0x06); //2bit
out[1] |= (hed.protection_absent & 0x01); //1 bit
out[2] = (hed.profile << 6 & 0xC0); // 2 bit
out[2] |= (hed.sf_index << 2 & 0x3C); //4bit
out[2] |= (hed.private_bit << 1 & 0x02); //1 bit
out[2] |= (hed.channel_configuration >> 2 & 0x03); //1 bit
out[3] = (hed.channel_configuration << 6 & 0xC0); // 2 bit
out[3] |= (hed.original << 5 & 0x20); //1 bit
out[3] |= (hed.home << 4 & 0x10); //1 bit
out[3] |= (hed.copyright_identification_bit << 3 & 0x08); //1 bit
out[3] |= (hed.copyright_identification_start << 2 & 0x04); //1 bit
out[3] |= (hed.aac_frame_length >> 11 & 0x03); //2 bit
out[4] = (hed.aac_frame_length >> 3 & 0xFF); //8 bit
out[5] = (hed.aac_frame_length << 5 & 0xE0); //3 bit
out[5] |= (hed.adts_buffer_fullness >> 6 & 0x1F); //5 bit
out[6] = (hed.adts_buffer_fullness << 2 & 0xFC); //6 bit
out[6] |= (hed.no_raw_data_blocks_in_frame & 0x03); //2 bit
}
string makeAdtsConfig(const uint8_t *pcAdts){
if (!(pcAdts[0] == 0xFF && (pcAdts[1] & 0xF0) == 0xF0)) {
return "";
}
// Get and check the 'profile':
unsigned char profile = (pcAdts[2] & 0xC0) >> 6; // 2 bits
if (profile == 3) {
return "";
}
// Get and check the 'sampling_frequency_index':
unsigned char sampling_frequency_index = (pcAdts[2] & 0x3C) >> 2; // 4 bits
if (samplingFrequencyTable[sampling_frequency_index] == 0) {
return "";
}
// Get and check the 'channel_configuration':
unsigned char channel_configuration = ((pcAdts[2] & 0x01) << 2)
| ((pcAdts[3] & 0xC0) >> 6); // 3 bits
unsigned char audioSpecificConfig[2];
unsigned char const audioObjectType = profile + 1;
audioSpecificConfig[0] = (audioObjectType << 3) | (sampling_frequency_index >> 1);
audioSpecificConfig[1] = (sampling_frequency_index << 7) | (channel_configuration << 3);
return string((char *)audioSpecificConfig,2);
}
void makeAdtsHeader(const string &strAudioCfg,AACFrame &adts) {
uint8_t cfg1 = strAudioCfg[0];
uint8_t cfg2 = strAudioCfg[1];
static void parseAacConfig(const string &config, AdtsHeader &adts) {
uint8_t cfg1 = config[0];
uint8_t cfg2 = config[1];
int audioObjectType;
int sampling_frequency_index;
......@@ -93,9 +91,83 @@ void makeAdtsHeader(const string &strAudioCfg,AACFrame &adts) {
adts.adts_buffer_fullness = 2047;
adts.no_raw_data_blocks_in_frame = 0;
}
void getAACInfo(const AACFrame &adts,int &iSampleRate,int &iChannel){
iSampleRate = samplingFrequencyTable[adts.sf_index];
iChannel = adts.channel_configuration;
string makeAacConfig(const uint8_t *hex, int length){
#ifndef ENABLE_MP4
if (!(hex[0] == 0xFF && (hex[1] & 0xF0) == 0xF0)) {
return "";
}
// Get and check the 'profile':
unsigned char profile = (hex[2] & 0xC0) >> 6; // 2 bits
if (profile == 3) {
return "";
}
// Get and check the 'sampling_frequency_index':
unsigned char sampling_frequency_index = (hex[2] & 0x3C) >> 2; // 4 bits
if (samplingFrequencyTable[sampling_frequency_index] == 0) {
return "";
}
// Get and check the 'channel_configuration':
unsigned char channel_configuration = ((hex[2] & 0x01) << 2) | ((hex[3] & 0xC0) >> 6); // 3 bits
unsigned char audioSpecificConfig[2];
unsigned char const audioObjectType = profile + 1;
audioSpecificConfig[0] = (audioObjectType << 3) | (sampling_frequency_index >> 1);
audioSpecificConfig[1] = (sampling_frequency_index << 7) | (channel_configuration << 3);
return string((char *)audioSpecificConfig,2);
#else
struct mpeg4_aac_t aac = {0};
if (mpeg4_aac_adts_load(hex, length, &aac) > 0) {
char buf[32] = {0};
int len = mpeg4_aac_audio_specific_config_save(&aac, (uint8_t *) buf, sizeof(buf));
if (len > 0) {
return string(buf, len);
}
}
WarnL << "生成aac config失败, adts header:" << hexdump(hex, length);
return "";
#endif
}
int dumpAacConfig(const string &config, int length, uint8_t *out, int out_size) {
#ifndef ENABLE_MP4
AdtsHeader header;
parseAacConfig(config, header);
header.aac_frame_length = length;
dumpAdtsHeader(header, out);
return ADTS_HEADER_LEN;
#else
struct mpeg4_aac_t aac = {0};
int ret = mpeg4_aac_audio_specific_config_load((uint8_t *) config.data(), config.size(), &aac);
if (ret > 0) {
ret = mpeg4_aac_adts_save(&aac, length, out, out_size);
}
if (ret < 0) {
WarnL << "生成adts头失败:" << ret << ", aac config:" << hexdump(config.data(), config.size());
}
return ret;
#endif
}
bool parseAacConfig(const string &config, int &samplerate, int &channels){
#ifndef ENABLE_MP4
AdtsHeader header;
parseAacConfig(config, header);
samplerate = samplingFrequencyTable[header.sf_index];
channels = header.channel_configuration;
return true;
#else
struct mpeg4_aac_t aac = {0};
int ret = mpeg4_aac_audio_specific_config_load((uint8_t *) config.data(), config.size(), &aac);
if (ret > 0) {
samplerate = aac.sampling_frequency;
channels = aac.channels;
return true;
}
WarnL << "获取aac采样率、声道数失败:" << hexdump(config.data(), config.size());
return false;
#endif
}
Sdp::Ptr AACTrack::getSdp() {
......@@ -103,9 +175,7 @@ Sdp::Ptr AACTrack::getSdp() {
WarnL << getCodecName() << " Track未准备好";
return nullptr;
}
return std::make_shared<AACSdp>(getAacCfg(),getAudioSampleRate());
return std::make_shared<AACSdp>(getAacCfg(),getAudioSampleRate(), getAudioChannel());
}
}//namespace mediakit
\ No newline at end of file
......@@ -13,98 +13,34 @@
#include "Frame.h"
#include "Track.h"
#define ADTS_HEADER_LEN 7
namespace mediakit{
class AACFrame;
unsigned const samplingFrequencyTable[16] = { 96000, 88200,
64000, 48000,
44100, 32000,
24000, 22050,
16000, 12000,
11025, 8000,
7350, 0, 0, 0 };
void makeAdtsHeader(const string &strAudioCfg,AACFrame &adts);
void writeAdtsHeader(const AACFrame &adts, uint8_t *pcAdts) ;
string makeAdtsConfig(const uint8_t *pcAdts);
void getAACInfo(const AACFrame &adts,int &iSampleRate,int &iChannel);
string makeAacConfig(const uint8_t *hex, int length);
int dumpAacConfig(const string &config, int length, uint8_t *out, int out_size);
bool parseAacConfig(const string &config, int &samplerate, int &channels);
/**
* aac帧,包含adts头
*/
class AACFrame : public Frame {
class AACFrame : public FrameImp {
public:
typedef std::shared_ptr<AACFrame> Ptr;
char *data() const override{
return (char *)buffer;
}
uint32_t size() const override {
return aac_frame_length;
}
uint32_t dts() const override {
return timeStamp;
}
uint32_t prefixSize() const override{
return iPrefixSize;
}
TrackType getTrackType() const override{
return TrackAudio;
}
CodecId getCodecId() const override{
return CodecAAC;
AACFrame(){
_codecid = CodecAAC;
}
};
bool keyFrame() const override {
return false;
}
bool configFrame() const override{
return false;
}
public:
unsigned int syncword = 0; //12 bslbf 同步字The bit string ‘1111 1111 1111’,说明一个ADTS帧的开始
unsigned int id; //1 bslbf MPEG 标示符, 设置为1
unsigned int layer; //2 uimsbf Indicates which layer is used. Set to ‘00’
unsigned int protection_absent; //1 bslbf 表示是否误码校验
unsigned int profile; //2 uimsbf 表示使用哪个级别的AAC,如01 Low Complexity(LC)--- AACLC
unsigned int sf_index; //4 uimsbf 表示使用的采样率下标
unsigned int private_bit; //1 bslbf
unsigned int channel_configuration; //3 uimsbf 表示声道数
unsigned int original; //1 bslbf
unsigned int home; //1 bslbf
//下面的为改变的参数即每一帧都不同
unsigned int copyright_identification_bit; //1 bslbf
unsigned int copyright_identification_start; //1 bslbf
unsigned int aac_frame_length; // 13 bslbf 一个ADTS帧的长度包括ADTS头和raw data block
unsigned int adts_buffer_fullness; //11 bslbf 0x7FF 说明是码率可变的码流
//no_raw_data_blocks_in_frame 表示ADTS帧中有number_of_raw_data_blocks_in_frame + 1个AAC原始帧.
//所以说number_of_raw_data_blocks_in_frame == 0
//表示说ADTS帧中有一个AAC数据块并不是说没有。(一个AAC原始帧包含一段时间内1024个采样及相关数据)
unsigned int no_raw_data_blocks_in_frame; //2 uimsfb
unsigned char buffer[2 * 1024 + 7];
uint32_t timeStamp;
uint32_t iPrefixSize = 7;
} ;
class AACFrameNoCacheAble : public FrameNoCacheAble {
class AACFrameNoCacheAble : public FrameFromPtr {
public:
typedef std::shared_ptr<AACFrameNoCacheAble> Ptr;
AACFrameNoCacheAble(char *ptr,uint32_t size,uint32_t dts,uint32_t pts = 0,int prefixeSize = 7){
AACFrameNoCacheAble(char *ptr,uint32_t size,uint32_t dts,uint32_t pts = 0,int prefix_size = ADTS_HEADER_LEN){
_ptr = ptr;
_size = size;
_dts = dts;
_prefixSize = prefixeSize;
}
TrackType getTrackType() const override{
return TrackAudio;
_prefix_size = prefix_size;
}
CodecId getCodecId() const override{
......@@ -118,8 +54,7 @@ public:
bool configFrame() const override{
return false;
}
} ;
};
/**
* aac音频通道
......@@ -136,44 +71,25 @@ public:
/**
* 构造aac类型的媒体
* @param aac_cfg aac两个字节的配置信息
* @param aac_cfg aac配置信息
*/
AACTrack(const string &aac_cfg){
if(aac_cfg.size() < 2){
throw std::invalid_argument("adts配置必须最少2个字节");
}
_cfg = aac_cfg.substr(0,2);
onReady();
setAacCfg(aac_cfg);
}
/**
* 构造aac类型的媒体
* @param adts_header adts头,7个字节
* @param adts_header_len adts头长度,不少于7个字节
* 设置aac 配置信息
*/
AACTrack(const char *adts_header,int adts_header_len = 7){
if(adts_header_len < 7){
throw std::invalid_argument("adts头必须不少于7个字节");
}
_cfg = makeAdtsConfig((uint8_t*)adts_header);
onReady();
}
/**
* 构造aac类型的媒体
* @param aac_frame_with_adts 带adts头的aac帧
*/
AACTrack(const Frame::Ptr &aac_frame_with_adts){
if(aac_frame_with_adts->getCodecId() != CodecAAC || aac_frame_with_adts->prefixSize() < 7){
throw std::invalid_argument("必须输入带adts头的aac帧");
void setAacCfg(const string &aac_cfg){
if (aac_cfg.size() < 2) {
throw std::invalid_argument("adts配置必须最少2个字节");
}
_cfg = makeAdtsConfig((uint8_t*)aac_frame_with_adts->data());
_cfg = aac_cfg;
onReady();
}
/**
* 获取aac两个字节的配置
* @return
* 获取aac 配置信息
*/
const string &getAacCfg() const{
return _cfg;
......@@ -181,7 +97,6 @@ public:
/**
* 返回编码类型
* @return
*/
CodecId getCodecId() const override{
return CodecAAC;
......@@ -189,30 +104,27 @@ public:
/**
* 在获取aac_cfg前是无效的Track
* @return
*/
bool ready() override {
return !_cfg.empty();
}
/**
* 返回音频采样率
* @return
*/
int getAudioSampleRate() const override{
return _sampleRate;
}
/**
* 返回音频采样位数,一般为16或8
* @return
*/
int getAudioSampleBit() const override{
return _sampleBit;
}
/**
* 返回音频通道数
* @return
*/
int getAudioChannel() const override{
return _channel;
......@@ -225,9 +137,9 @@ public:
void inputFrame(const Frame::Ptr &frame) override{
if (_cfg.empty()) {
//未获取到aac_cfg信息
if (frame->prefixSize() >= 7) {
if (frame->prefixSize()) {
//7个字节的adts头
_cfg = makeAdtsConfig((uint8_t *)(frame->data()));
_cfg = makeAacConfig((uint8_t *) (frame->data()), frame->prefixSize());
onReady();
} else {
WarnL << "无法获取adts头!";
......@@ -240,13 +152,12 @@ private:
* 解析2个字节的aac配置
*/
void onReady(){
if(_cfg.size() < 2){
if (_cfg.size() < 2) {
return;
}
AACFrame aacFrame;
makeAdtsHeader(_cfg,aacFrame);
getAACInfo(aacFrame,_sampleRate,_channel);
parseAacConfig(_cfg, _sampleRate, _channel);
}
Track::Ptr clone() override {
return std::make_shared<std::remove_reference<decltype(*this)>::type >(*this);
}
......@@ -260,43 +171,42 @@ private:
int _channel = 0;
};
/**
* aac类型SDP
*/
* aac类型SDP
*/
class AACSdp : public Sdp {
public:
/**
*
* 构造函数
* @param aac_cfg aac两个字节的配置描述
* @param sample_rate 音频采样率
* @param playload_type rtp playload type 默认98
* @param payload_type rtp payload type 默认98
* @param bitrate 比特率
*/
AACSdp(const string &aac_cfg,
int sample_rate,
int playload_type = 98,
int bitrate = 128) : Sdp(sample_rate,playload_type){
_printer << "m=audio 0 RTP/AVP " << playload_type << "\r\n";
int channels,
int payload_type = 98,
int bitrate = 128) : Sdp(sample_rate,payload_type){
_printer << "m=audio 0 RTP/AVP " << payload_type << "\r\n";
_printer << "b=AS:" << bitrate << "\r\n";
_printer << "a=rtpmap:" << playload_type << " MPEG4-GENERIC/" << sample_rate << "\r\n";
_printer << "a=rtpmap:" << payload_type << " MPEG4-GENERIC/" << sample_rate << "/" << channels << "\r\n";
char configStr[32] = {0};
snprintf(configStr, sizeof(configStr), "%02X%02X", (uint8_t)aac_cfg[0], (uint8_t)aac_cfg[1]);
_printer << "a=fmtp:" << playload_type << " streamtype=5;profile-level-id=1;mode=AAC-hbr;"
<< "sizelength=13;indexlength=3;indexdeltalength=3;config="
<< configStr << "\r\n";
_printer << "a=control:trackID=" << getTrackType() << "\r\n";
string configStr;
char buf[4] = {0};
for(auto &ch : aac_cfg){
snprintf(buf, sizeof(buf), "%02X", (uint8_t)ch);
configStr.append(buf);
}
_printer << "a=fmtp:" << payload_type << " streamtype=5;profile-level-id=1;mode=AAC-hbr;"
<< "sizelength=13;indexlength=3;indexdeltalength=3;config=" << configStr << "\r\n";
_printer << "a=control:trackID=" << (int)TrackAudio << "\r\n";
}
string getSdp() const override {
return _printer;
}
TrackType getTrackType() const override {
return TrackAudio;
}
CodecId getCodecId() const override {
return CodecAAC;
}
......@@ -305,6 +215,4 @@ private:
};
}//namespace mediakit
#endif //ZLMEDIAKIT_AAC_H
\ No newline at end of file
......@@ -13,18 +13,6 @@
namespace mediakit{
AACRtmpDecoder::AACRtmpDecoder() {
_adts = obtainFrame();
}
AACFrame::Ptr AACRtmpDecoder::obtainFrame() {
//从缓存池重新申请对象,防止覆盖已经写入环形缓存的对象
auto frame = ResourcePoolHelper<AACFrame>::obtainObj();
frame->aac_frame_length = 7;
frame->iPrefixSize = 7;
return frame;
}
static string getAacCfg(const RtmpPacket &thiz) {
string ret;
if (thiz.getMediaType() != FLV_CODEC_AAC) {
......@@ -37,11 +25,11 @@ static string getAacCfg(const RtmpPacket &thiz) {
WarnL << "bad aac cfg!";
return ret;
}
ret = thiz.strBuf.substr(2, 2);
ret = thiz.strBuf.substr(2);
return ret;
}
bool AACRtmpDecoder::inputRtmp(const RtmpPacket::Ptr &pkt, bool key_pos) {
bool AACRtmpDecoder::inputRtmp(const RtmpPacket::Ptr &pkt, bool) {
if (pkt->isCfgFrame()) {
_aac_cfg = getAacCfg(*pkt);
return false;
......@@ -52,26 +40,28 @@ bool AACRtmpDecoder::inputRtmp(const RtmpPacket::Ptr &pkt, bool key_pos) {
return false;
}
void AACRtmpDecoder::onGetAAC(const char* pcData, int iLen, uint32_t ui32TimeStamp) {
if(iLen + 7 > sizeof(_adts->buffer)){
WarnL << "Illegal adts data, exceeding the length limit.";
return;
}
//写adts结构头
makeAdtsHeader(_aac_cfg,*_adts);
void AACRtmpDecoder::onGetAAC(const char* data, int len, uint32_t stamp) {
auto frame = ResourcePoolHelper<AACFrame>::obtainObj();
//拷贝aac负载
memcpy(_adts->buffer + 7, pcData, iLen);
_adts->aac_frame_length = 7 + iLen;
_adts->timeStamp = ui32TimeStamp;
//生成adts头
char adts_header[32] = {0};
auto size = dumpAacConfig(_aac_cfg, len, (uint8_t *) adts_header, sizeof(adts_header));
if (size > 0) {
frame->_buffer.assign(adts_header, size);
frame->_prefix_size = size;
} else {
frame->_buffer.clear();
frame->_prefix_size = 0;
}
//adts结构头转成头7个字节
writeAdtsHeader(*_adts, _adts->buffer);
//追加负载数据
frame->_buffer.append(data, len);
frame->_dts = stamp;
//写入环形缓存
RtmpCodec::inputFrame(_adts);
_adts = obtainFrame();
RtmpCodec::inputFrame(frame);
}
/////////////////////////////////////////////////////////////////////////////////////
AACRtmpEncoder::AACRtmpEncoder(const Track::Ptr &track) {
......@@ -91,9 +81,9 @@ void AACRtmpEncoder::makeConfigPacket() {
void AACRtmpEncoder::inputFrame(const Frame::Ptr &frame) {
if (_aac_cfg.empty()) {
if (frame->prefixSize() >= 7) {
if (frame->prefixSize()) {
//包含adts头,从adts头获取aac配置信息
_aac_cfg = makeAdtsConfig((uint8_t *)(frame->data()));
_aac_cfg = makeAacConfig((uint8_t *) (frame->data()), frame->prefixSize());
}
makeConfigPacket();
}
......
......@@ -23,7 +23,7 @@ class AACRtmpDecoder : public RtmpCodec , public ResourcePoolHelper<AACFrame> {
public:
typedef std::shared_ptr<AACRtmpDecoder> Ptr;
AACRtmpDecoder();
AACRtmpDecoder() {}
~AACRtmpDecoder() {}
/**
......@@ -33,19 +33,14 @@ public:
*/
bool inputRtmp(const RtmpPacket::Ptr &Rtmp, bool key_pos = false) override;
TrackType getTrackType() const override{
return TrackAudio;
}
CodecId getCodecId() const override{
return CodecAAC;
}
protected:
void onGetAAC(const char* pcData, int iLen, uint32_t ui32TimeStamp);
AACFrame::Ptr obtainFrame();
protected:
AACFrame::Ptr _adts;
private:
void onGetAAC(const char *data, int len, uint32_t stamp);
private:
string _aac_cfg;
};
......@@ -76,11 +71,14 @@ public:
* 生成config包
*/
void makeConfigPacket() override;
private:
void makeAudioConfigPkt();
private:
uint8_t _audio_flv_flags;
AACTrack::Ptr _track;
string _aac_cfg;
};
}//namespace mediakit
......
......@@ -9,19 +9,19 @@
*/
#include "AACRtp.h"
#define ADTS_HEADER_LEN 7
#define AAC_MAX_FRAME_SIZE (2 * 1024)
namespace mediakit{
AACRtpEncoder::AACRtpEncoder(uint32_t ui32Ssrc,
uint32_t ui32MtuSize,
uint32_t ui32SampleRate,
uint8_t ui8PlayloadType,
uint8_t ui8PayloadType,
uint8_t ui8Interleaved) :
RtpInfo(ui32Ssrc,
ui32MtuSize,
ui32SampleRate,
ui8PlayloadType,
ui8PayloadType,
ui8Interleaved){
}
......@@ -56,32 +56,30 @@ void AACRtpEncoder::inputFrame(const Frame::Ptr &frame) {
}
void AACRtpEncoder::makeAACRtp(const void *data, unsigned int len, bool mark, uint32_t uiStamp) {
RtpCodec::inputRtp(makeRtp(getTrackType(),data,len,mark,uiStamp), false);
RtpCodec::inputRtp(makeRtp(getTrackType(), data, len, mark, uiStamp), false);
}
/////////////////////////////////////////////////////////////////////////////////////
AACRtpDecoder::AACRtpDecoder(const Track::Ptr &track){
AACRtpDecoder::AACRtpDecoder(const Track::Ptr &track) {
auto aacTrack = dynamic_pointer_cast<AACTrack>(track);
if(!aacTrack || !aacTrack->ready()){
if (!aacTrack || !aacTrack->ready()) {
WarnL << "该aac track无效!";
}else{
} else {
_aac_cfg = aacTrack->getAacCfg();
}
_adts = obtainFrame();
_frame = obtainFrame();
}
AACRtpDecoder::AACRtpDecoder() {
_adts = obtainFrame();
_frame = obtainFrame();
}
AACFrame::Ptr AACRtpDecoder::obtainFrame() {
//从缓存池重新申请对象,防止覆盖已经写入环形缓存的对象
auto frame = ResourcePoolHelper<AACFrame>::obtainObj();
frame->aac_frame_length = ADTS_HEADER_LEN;
frame->iPrefixSize = ADTS_HEADER_LEN;
if(frame->syncword == 0 && !_aac_cfg.empty()) {
makeAdtsHeader(_aac_cfg,*frame);
}
frame->_prefix_size = 0;
frame->_buffer.clear();
return frame;
}
......@@ -96,20 +94,18 @@ bool AACRtpDecoder::inputRtp(const RtpPacket::Ptr &rtppack, bool key_pos) {
//忽略Au-Header区
ptr += 2 + au_header_count * 2;
static const uint32_t max_size = sizeof(AACFrame::buffer) - ADTS_HEADER_LEN;
while (ptr < end) {
auto size = (uint32_t) (end - ptr);
if(size > max_size){
size = max_size;
if (size > AAC_MAX_FRAME_SIZE) {
size = AAC_MAX_FRAME_SIZE;
}
if (_adts->aac_frame_length + size > sizeof(AACFrame::buffer)) {
if (_frame->size() + size > AAC_MAX_FRAME_SIZE) {
//数据太多了,先清空
flushData();
}
//追加aac数据
memcpy(_adts->buffer + _adts->aac_frame_length, ptr, size);
_adts->aac_frame_length += size;
_adts->timeStamp = rtppack->timeStamp;
_frame->_buffer.append((char *) ptr, size);
_frame->_dts = rtppack->timeStamp;
ptr += size;
}
......@@ -120,15 +116,22 @@ bool AACRtpDecoder::inputRtp(const RtpPacket::Ptr &rtppack, bool key_pos) {
return false;
}
void AACRtpDecoder::flushData() {
if(_adts->aac_frame_length == ADTS_HEADER_LEN){
if (_frame->_buffer.empty()) {
//没有有效数据
return;
}
writeAdtsHeader(*_adts, _adts->buffer);
RtpCodec::inputFrame(_adts);
_adts = obtainFrame();
//插入adts头
char adts_header[32] = {0};
auto size = dumpAacConfig(_aac_cfg, _frame->_buffer.size(), (uint8_t *) adts_header, sizeof(adts_header));
if (size > 0) {
//插入adts头
_frame->_buffer.insert(0, adts_header, size);
_frame->_prefix_size = size;
}
RtpCodec::inputFrame(_frame);
_frame = obtainFrame();
}
......
......@@ -31,19 +31,19 @@ public:
*/
bool inputRtp(const RtpPacket::Ptr &rtp, bool key_pos = false) override;
TrackType getTrackType() const override{
return TrackAudio;
}
CodecId getCodecId() const override{
CodecId getCodecId() const override {
return CodecAAC;
}
protected:
AACRtpDecoder();
private:
AACFrame::Ptr obtainFrame();
void flushData();
private:
AACFrame::Ptr _adts;
AACFrame::Ptr _frame;
string _aac_cfg;
};
......@@ -59,13 +59,13 @@ public:
* @param ui32Ssrc ssrc
* @param ui32MtuSize mtu 大小
* @param ui32SampleRate 采样率
* @param ui8PlayloadType pt类型
* @param ui8PayloadType pt类型
* @param ui8Interleaved rtsp interleaved 值
*/
AACRtpEncoder(uint32_t ui32Ssrc,
uint32_t ui32MtuSize,
uint32_t ui32SampleRate,
uint8_t ui8PlayloadType = 97,
uint8_t ui8PayloadType = 97,
uint8_t ui8Interleaved = TrackAudio * 2);
~AACRtpEncoder() {}
......@@ -74,8 +74,10 @@ public:
* @param frame 带dats头的aac数据
*/
void inputFrame(const Frame::Ptr &frame) override;
private:
void makeAACRtp(const void *pData, unsigned int uiLen, bool bMark, uint32_t uiStamp);
private:
unsigned char _aucSectionBuf[1600];
};
......
......@@ -33,17 +33,12 @@ Track::Ptr Factory::getTrackBySdp(const SdpTrack::Ptr &track) {
return nullptr;
}
string aac_cfg;
unsigned int cfg1;
sscanf(aac_cfg_str.substr(0, 2).data(), "%02X", &cfg1);
cfg1 &= 0x00FF;
aac_cfg.push_back(cfg1);
unsigned int cfg2;
sscanf(aac_cfg_str.substr(2, 2).data(), "%02X", &cfg2);
cfg2 &= 0x00FF;
aac_cfg.push_back(cfg2);
for(int i = 0 ; i < aac_cfg_str.size() / 2 ; ++i ){
unsigned int cfg;
sscanf(aac_cfg_str.substr(i * 2, 2).data(), "%02X", &cfg);
cfg &= 0x00FF;
aac_cfg.push_back((char)cfg);
}
return std::make_shared<AACTrack>(aac_cfg);
}
......@@ -115,7 +110,7 @@ RtpCodec::Ptr Factory::getRtpEncoderBySdp(const Sdp::Ptr &sdp) {
}
auto mtu = (sdp->getTrackType() == TrackVideo ? video_mtu : audio_mtu);
auto sample_rate = sdp->getSampleRate();
auto pt = sdp->getPlayloadType();
auto pt = sdp->getPayloadType();
auto interleaved = sdp->getTrackType() * 2;
auto codec_id = sdp->getCodecId();
switch (codec_id){
......@@ -221,13 +216,27 @@ Track::Ptr Factory::getAudioTrackByAmf(const AMFValue& amf, int sample_rate, int
return getTrackByCodecId(codecId, sample_rate, channels, sample_bit);
}
RtmpCodec::Ptr Factory::getRtmpCodecByTrack(const Track::Ptr &track) {
RtmpCodec::Ptr Factory::getRtmpCodecByTrack(const Track::Ptr &track, bool is_encode) {
switch (track->getCodecId()){
case CodecH264 : return std::make_shared<H264RtmpEncoder>(track);
case CodecAAC : return std::make_shared<AACRtmpEncoder>(track);
case CodecH265 : return std::make_shared<H265RtmpEncoder>(track);
case CodecG711A :
case CodecG711U : return std::make_shared<G711RtmpEncoder>(track);
case CodecG711U : {
auto audio_track = dynamic_pointer_cast<AudioTrack>(track);
if (is_encode && (audio_track->getAudioSampleRate() != 8000 ||
audio_track->getAudioChannel() != 1 ||
audio_track->getAudioSampleBit() != 16)) {
//rtmp对g711只支持8000/1/16规格,但是ZLMediaKit可以解析其他规格的G711
WarnL << "RTMP只支持8000/1/16规格的G711,目前规格是:"
<< audio_track->getAudioSampleRate() << "/"
<< audio_track->getAudioChannel() << "/"
<< audio_track->getAudioSampleBit()
<< ",该音频已被忽略";
return nullptr;
}
return std::make_shared<G711RtmpEncoder>(track);
}
default : WarnL << "暂不支持该CodecId:" << track->getCodecName(); return nullptr;
}
}
......
......@@ -59,8 +59,9 @@ public:
/**
* 根据Track获取Rtmp的编解码器
* @param track 媒体描述对象
* @param is_encode 是否为编码器还是解码器
*/
static RtmpCodec::Ptr getRtmpCodecByTrack(const Track::Ptr &track);
static RtmpCodec::Ptr getRtmpCodecByTrack(const Track::Ptr &track, bool is_encode);
/**
* 根据codecId获取rtmp的codec描述
......
......@@ -15,6 +15,59 @@ using namespace toolkit;
namespace mediakit{
/**
* 该对象的功能是把一个不可缓存的帧转换成可缓存的帧
*/
class FrameCacheAble : public FrameFromPtr {
public:
typedef std::shared_ptr<FrameCacheAble> Ptr;
FrameCacheAble(const Frame::Ptr &frame){
if(frame->cacheAble()){
_frame = frame;
_ptr = frame->data();
}else{
_buffer = std::make_shared<BufferRaw>();
_buffer->assign(frame->data(),frame->size());
_ptr = _buffer->data();
}
_size = frame->size();
_dts = frame->dts();
_pts = frame->pts();
_prefix_size = frame->prefixSize();
_codecid = frame->getCodecId();
_key = frame->keyFrame();
_config = frame->configFrame();
}
virtual ~FrameCacheAble() = default;
/**
* 可以被缓存
*/
bool cacheAble() const override {
return true;
}
CodecId getCodecId() const override{
return _codecid;
}
bool keyFrame() const override{
return _key;
}
bool configFrame() const override{
return _config;
}
private:
Frame::Ptr _frame;
BufferRaw::Ptr _buffer;
CodecId _codecid;
bool _key;
bool _config;
};
Frame::Ptr Frame::getCacheAbleFrame(const Frame::Ptr &frame){
if(frame->cacheAble()){
return frame;
......@@ -23,17 +76,35 @@ Frame::Ptr Frame::getCacheAbleFrame(const Frame::Ptr &frame){
}
#define SWITCH_CASE(codec_id) case codec_id : return #codec_id
const char *CodecInfo::getCodecName() {
switch (getCodecId()) {
const char *getCodecName(CodecId codecId) {
switch (codecId) {
SWITCH_CASE(CodecH264);
SWITCH_CASE(CodecH265);
SWITCH_CASE(CodecAAC);
SWITCH_CASE(CodecG711A);
SWITCH_CASE(CodecG711U);
default:
return "unknown codec";
SWITCH_CASE(CodecOpus);
default : return "unknown codec";
}
}
}//namespace mediakit
TrackType getTrackType(CodecId codecId){
switch (codecId){
case CodecH264:
case CodecH265: return TrackVideo;
case CodecAAC:
case CodecG711A:
case CodecG711U:
case CodecOpus: return TrackAudio;
default: return TrackInvalid;
}
}
const char *CodecInfo::getCodecName() {
return mediakit::getCodecName(getCodecId());
}
TrackType CodecInfo::getTrackType() {
return mediakit::getTrackType(getCodecId());
}
}//namespace mediakit
......@@ -28,6 +28,7 @@ typedef enum {
CodecAAC,
CodecG711A,
CodecG711U,
CodecOpus,
CodecMax = 0x7FFF
} CodecId;
......@@ -40,6 +41,16 @@ typedef enum {
} TrackType;
/**
* 获取编码器名称
*/
const char *getCodecName(CodecId codecId);
/**
* 获取音视频类型
*/
TrackType getTrackType(CodecId codecId);
/**
* 编码信息的抽象接口
*/
class CodecInfo {
......@@ -50,20 +61,19 @@ public:
virtual ~CodecInfo(){}
/**
* 获取音视频类型
*/
virtual TrackType getTrackType() const = 0;
/**
* 获取编解码器类型
*/
virtual CodecId getCodecId() const = 0;
/**
* 获取编码器名称
* @return 编码器名称
*/
const char *getCodecName();
/**
* 获取音视频类型
*/
TrackType getTrackType();
};
/**
......@@ -76,15 +86,11 @@ public:
/**
* 返回解码时间戳,单位毫秒
* @return
*/
virtual uint32_t dts() const = 0;
/**
* 返回显示时间戳,单位毫秒
* @return
*/
virtual uint32_t pts() const {
return dts();
......@@ -98,13 +104,11 @@ public:
/**
* 返回是否为关键帧
* @return
*/
virtual bool keyFrame() const = 0;
/**
* 是否为配置帧,譬如sps pps vps
* @return
*/
virtual bool configFrame() const = 0;
......@@ -115,14 +119,77 @@ public:
/**
* 返回可缓存的frame
* @return
*/
static Ptr getCacheAbleFrame(const Ptr &frame);
};
class FrameImp : public Frame {
public:
typedef std::shared_ptr<FrameImp> Ptr;
char *data() const override{
return (char *)_buffer.data();
}
uint32_t size() const override {
return _buffer.size();
}
uint32_t dts() const override {
return _dts;
}
uint32_t pts() const override{
return _pts ? _pts : _dts;
}
uint32_t prefixSize() const override{
return _prefix_size;
}
CodecId getCodecId() const override{
return _codecid;
}
bool keyFrame() const override {
return false;
}
bool configFrame() const override{
return false;
}
public:
CodecId _codecid = CodecInvalid;
string _buffer;
uint32_t _dts = 0;
uint32_t _pts = 0;
uint32_t _prefix_size = 0;
};
/**
* 一个Frame类中可以有多个帧,他们通过 0x 00 00 01 分隔
* ZLMediaKit会先把这种复合帧split成单个帧然后再处理
* 一个复合帧可以通过无内存拷贝的方式切割成多个子Frame
* 提供该类的目的是切割复合帧时防止内存拷贝,提高性能
*/
template<typename Parent>
class FrameInternal : public Parent{
public:
typedef std::shared_ptr<FrameInternal> Ptr;
FrameInternal(const Frame::Ptr &parent_frame, char *ptr, uint32_t size, int prefix_size)
: Parent(ptr, size, parent_frame->dts(), parent_frame->pts(), prefix_size) {
_parent_frame = parent_frame;
}
bool cacheAble() const override {
return _parent_frame->cacheAble();
}
private:
Frame::Ptr _parent_frame;
};
/**
* 循环池辅助类
* @tparam T
*/
template <typename T>
class ResourcePoolHelper{
......@@ -140,17 +207,16 @@ private:
};
/**
* 写帧接口的抽闲接口
* 写帧接口的抽象接口类
*/
class FrameWriterInterface {
public:
typedef std::shared_ptr<FrameWriterInterface> Ptr;
FrameWriterInterface(){}
virtual ~FrameWriterInterface(){}
/**
* 写入帧数据
* @param frame 帧
*/
virtual void inputFrame(const Frame::Ptr &frame) = 0;
};
......@@ -165,15 +231,15 @@ public:
/**
* inputFrame后触发onWriteFrame回调
* @param cb
*/
FrameWriterInterfaceHelper(const onWriteFrame& cb){
_writeCallback = cb;
}
virtual ~FrameWriterInterfaceHelper(){}
/**
* 写入帧数据
* @param frame 帧
*/
void inputFrame(const Frame::Ptr &frame) override {
_writeCallback(frame);
......@@ -182,7 +248,6 @@ private:
onWriteFrame _writeCallback;
};
/**
* 支持代理转发的帧环形缓存
*/
......@@ -193,6 +258,9 @@ public:
FrameDispatcher(){}
virtual ~FrameDispatcher(){}
/**
* 添加代理
*/
void addDelegate(const FrameWriterInterface::Ptr &delegate){
//_delegates_write可能多线程同时操作
lock_guard<mutex> lck(_mtx);
......@@ -200,7 +268,10 @@ public:
_need_update = true;
}
void delDelegate(void *ptr){
/**
* 删除代理
*/
void delDelegate(FrameWriterInterface *ptr){
//_delegates_write可能多线程同时操作
lock_guard<mutex> lck(_mtx);
_delegates_write.erase(ptr);
......@@ -208,8 +279,7 @@ public:
}
/**
* 写入帧数据
* @param frame 帧
* 写入帧并派发
*/
void inputFrame(const Frame::Ptr &frame) override{
if(_need_update){
......@@ -223,7 +293,13 @@ public:
for(auto &pr : _delegates_read){
pr.second->inputFrame(frame);
}
}
/**
* 返回代理个数
*/
int size() const {
return _delegates_write.size();
}
private:
mutex _mtx;
......@@ -250,105 +326,23 @@ public:
}
uint32_t pts() const override{
if(_pts){
return _pts;
}
return dts();
return _pts ? _pts : dts();
}
uint32_t prefixSize() const override{
return _prefixSize;
return _prefix_size;
}
bool cacheAble() const override {
return false;
}
protected:
char *_ptr;
uint32_t _size;
uint32_t _dts;
uint32_t _pts = 0;
uint32_t _prefixSize;
uint32_t _prefix_size;
};
/**
* 不可缓存的帧,在DevChannel类中有用到。
* 该帧类型用于防止内存拷贝,直接使用指针传递数据
* 在大多数情况下,ZLMediaKit是同步对帧数据进行使用和处理的
* 所以提供此类型的帧很有必要,但是有时又无法避免缓存帧做后续处理
* 所以可以通过Frame::getCacheAbleFrame方法拷贝一个可缓存的帧
*/
class FrameNoCacheAble : public FrameFromPtr{
public:
typedef std::shared_ptr<FrameNoCacheAble> Ptr;
/**
* 该帧不可缓存
* @return
*/
bool cacheAble() const override {
return false;
}
};
/**
* 该对象的功能是把一个不可缓存的帧转换成可缓存的帧
* @see FrameNoCacheAble
*/
class FrameCacheAble : public FrameFromPtr {
public:
typedef std::shared_ptr<FrameCacheAble> Ptr;
FrameCacheAble(const Frame::Ptr &frame){
if(frame->cacheAble()){
_frame = frame;
_ptr = frame->data();
}else{
_buffer = std::make_shared<BufferRaw>();
_buffer->assign(frame->data(),frame->size());
_ptr = _buffer->data();
}
_size = frame->size();
_dts = frame->dts();
_pts = frame->pts();
_prefixSize = frame->prefixSize();
_trackType = frame->getTrackType();
_codec = frame->getCodecId();
_key = frame->keyFrame();
_config = frame->configFrame();
}
virtual ~FrameCacheAble() = default;
/**
* 可以被缓存
* @return
*/
bool cacheAble() const override {
return true;
}
TrackType getTrackType() const override{
return _trackType;
}
CodecId getCodecId() const override{
return _codec;
}
bool keyFrame() const override{
return _key;
}
bool configFrame() const override{
return _config;
}
private:
Frame::Ptr _frame;
BufferRaw::Ptr _buffer;
TrackType _trackType;
CodecId _codec;
bool _key;
bool _config;
};
}//namespace mediakit
#endif //ZLMEDIAKIT_FRAME_H
\ No newline at end of file
......@@ -19,76 +19,28 @@ namespace mediakit{
/**
* G711帧
*/
class G711Frame : public Frame {
class G711Frame : public FrameImp {
public:
typedef std::shared_ptr<G711Frame> Ptr;
char *data() const override{
return (char *)buffer.data();
}
uint32_t size() const override {
return buffer.size();
}
uint32_t dts() const override {
return timeStamp;
}
uint32_t prefixSize() const override{
return 0;
}
TrackType getTrackType() const override{
return TrackAudio;
}
CodecId getCodecId() const override{
return _codecId;
}
bool keyFrame() const override {
return false;
}
bool configFrame() const override{
return false;
G711Frame(){
_codecid = CodecG711A;
}
public:
CodecId _codecId = CodecG711A;
string buffer;
uint32_t timeStamp;
} ;
};
class G711FrameNoCacheAble : public FrameNoCacheAble {
class G711FrameNoCacheAble : public FrameFromPtr {
public:
typedef std::shared_ptr<G711FrameNoCacheAble> Ptr;
//兼容通用接口
G711FrameNoCacheAble(char *ptr,uint32_t size,uint32_t dts, uint32_t pts = 0,int prefixeSize = 0){
G711FrameNoCacheAble(char *ptr,uint32_t size,uint32_t dts, uint32_t pts = 0,int prefix_size = 0){
_ptr = ptr;
_size = size;
_dts = dts;
_prefixSize = prefixeSize;
_prefix_size = prefix_size;
}
//兼容通用接口
void setCodec(CodecId codecId){
_codecId = codecId;
}
G711FrameNoCacheAble(CodecId codecId, char *ptr,uint32_t size,uint32_t dts,int prefixeSize = 0){
_codecId = codecId;
_ptr = ptr;
_size = size;
_dts = dts;
_prefixSize = prefixeSize;
}
TrackType getTrackType() const override{
return TrackAudio;
}
CodecId getCodecId() const override{
return _codecId;
}
......@@ -108,67 +60,18 @@ private:
/**
* G711音频通道
*/
class G711Track : public AudioTrack{
class G711Track : public AudioTrackImp{
public:
typedef std::shared_ptr<G711Track> Ptr;
/**
* G711A G711U
*/
G711Track(CodecId codecId,int sample_rate, int channels, int sample_bit){
_codecid = codecId;
_sample_rate = sample_rate;
_channels = channels;
_sample_bit = sample_bit;
}
/**
* 返回编码类型
*/
CodecId getCodecId() const override{
return _codecid;
}
/**
* 是否已经初始化
*/
bool ready() override {
return true;
}
/**
* 返回音频采样率
*/
int getAudioSampleRate() const override{
return _sample_rate;
}
/**
* 返回音频采样位数,一般为16或8
*/
int getAudioSampleBit() const override{
return _sample_bit;
}
/**
* 返回音频通道数
*/
int getAudioChannel() const override{
return _channels;
}
G711Track(CodecId codecId,int sample_rate, int channels, int sample_bit) : AudioTrackImp(codecId,sample_rate,channels,sample_bit){}
private:
//克隆该Track
Track::Ptr clone() override {
return std::make_shared<std::remove_reference<decltype(*this)>::type >(*this);
}
//生成sdp
Sdp::Ptr getSdp() override ;
private:
CodecId _codecid;
int _sample_rate;
int _channels;
int _sample_bit;
};
/**
......@@ -180,37 +83,30 @@ public:
* G711采样率固定为8000
* @param codecId G711A G711U
* @param sample_rate 音频采样率
* @param playload_type rtp playload
* @param payload_type rtp payload
* @param bitrate 比特率
*/
G711Sdp(CodecId codecId,
int sample_rate,
int channels,
int playload_type = 98,
int bitrate = 128) : Sdp(sample_rate,playload_type), _codecId(codecId){
_printer << "m=audio 0 RTP/AVP " << playload_type << "\r\n";
_printer << "a=rtpmap:" << playload_type << (codecId == CodecG711A ? " PCMA/" : " PCMU/") << sample_rate << "/" << channels << "\r\n";
_printer << "a=control:trackID=" << getTrackType() << "\r\n";
int payload_type = 98,
int bitrate = 128) : Sdp(sample_rate,payload_type), _codecId(codecId){
_printer << "m=audio 0 RTP/AVP " << payload_type << "\r\n";
_printer << "a=rtpmap:" << payload_type << (codecId == CodecG711A ? " PCMA/" : " PCMU/") << sample_rate << "/" << channels << "\r\n";
_printer << "a=control:trackID=" << (int)TrackAudio << "\r\n";
}
string getSdp() const override {
return _printer;
}
TrackType getTrackType() const override {
return TrackAudio;
}
CodecId getCodecId() const override {
return _codecId;
}
private:
_StrPrinter _printer;
CodecId _codecId;
};
}//namespace mediakit
#endif //ZLMEDIAKIT_AAC_H
#endif //ZLMEDIAKIT_G711_H
\ No newline at end of file
......@@ -20,15 +20,15 @@ G711RtmpDecoder::G711RtmpDecoder(CodecId codecId) {
G711Frame::Ptr G711RtmpDecoder::obtainFrame() {
//从缓存池重新申请对象,防止覆盖已经写入环形缓存的对象
auto frame = ResourcePoolHelper<G711Frame>::obtainObj();
frame->buffer.clear();
frame->_codecId = _codecId;
frame->_buffer.clear();
frame->_codecid = _codecId;
return frame;
}
bool G711RtmpDecoder::inputRtmp(const RtmpPacket::Ptr &pkt, bool) {
//拷贝G711负载
_frame->buffer.assign(pkt->strBuf.data() + 1, pkt->strBuf.size() - 1);
_frame->timeStamp = pkt->timeStamp;
_frame->_buffer.assign(pkt->strBuf.data() + 1, pkt->strBuf.size() - 1);
_frame->_dts = pkt->timeStamp;
//写入环形缓存
RtmpCodec::inputFrame(_frame);
_frame = obtainFrame();
......
......@@ -33,10 +33,6 @@ public:
*/
bool inputRtmp(const RtmpPacket::Ptr &Rtmp, bool key_pos = false) override;
TrackType getTrackType() const override{
return TrackAudio;
}
CodecId getCodecId() const override{
return _codecId;
}
......
......@@ -20,9 +20,9 @@ G711RtpDecoder::G711RtpDecoder(const Track::Ptr &track){
G711Frame::Ptr G711RtpDecoder::obtainFrame() {
//从缓存池重新申请对象,防止覆盖已经写入环形缓存的对象
auto frame = ResourcePoolHelper<G711Frame>::obtainObj();
frame->buffer.clear();
frame->_codecId = _codecid;
frame->timeStamp = 0;
frame->_buffer.clear();
frame->_codecid = _codecid;
frame->_dts = 0;
return frame;
}
......@@ -32,17 +32,17 @@ bool G711RtpDecoder::inputRtp(const RtpPacket::Ptr &rtppack, bool) {
// 获取rtp数据
const char *rtp_packet_buf = rtppack->data() + rtppack->offset;
if (rtppack->timeStamp != _frame->timeStamp) {
if (rtppack->timeStamp != _frame->_dts) {
//时间戳变更,清空上一帧
onGetG711(_frame);
}
//追加数据
_frame->buffer.append(rtp_packet_buf, length);
_frame->_buffer.append(rtp_packet_buf, length);
//赋值时间戳
_frame->timeStamp = rtppack->timeStamp;
_frame->_dts = rtppack->timeStamp;
if (rtppack->mark || _frame->buffer.size() > 10 * 1024) {
if (rtppack->mark || _frame->_buffer.size() > 10 * 1024) {
//标记为mark时,或者内存快溢出时,我们认为这是该帧最后一个包
onGetG711(_frame);
}
......@@ -50,7 +50,7 @@ bool G711RtpDecoder::inputRtp(const RtpPacket::Ptr &rtppack, bool) {
}
void G711RtpDecoder::onGetG711(const G711Frame::Ptr &frame) {
if(!frame->buffer.empty()){
if(!frame->_buffer.empty()){
//写入环形缓存
RtpCodec::inputFrame(frame);
_frame = obtainFrame();
......@@ -62,12 +62,12 @@ void G711RtpDecoder::onGetG711(const G711Frame::Ptr &frame) {
G711RtpEncoder::G711RtpEncoder(uint32_t ui32Ssrc,
uint32_t ui32MtuSize,
uint32_t ui32SampleRate,
uint8_t ui8PlayloadType,
uint8_t ui8PayloadType,
uint8_t ui8Interleaved) :
RtpInfo(ui32Ssrc,
ui32MtuSize,
ui32SampleRate,
ui8PlayloadType,
ui8PayloadType,
ui8Interleaved) {
}
......@@ -96,6 +96,3 @@ void G711RtpEncoder::makeG711Rtp(const void *data, unsigned int len, bool mark,
}
}//namespace mediakit
......@@ -31,10 +31,6 @@ public:
*/
bool inputRtp(const RtpPacket::Ptr &rtp, bool key_pos = false) override;
TrackType getTrackType() const override{
return TrackAudio;
}
CodecId getCodecId() const override{
return _codecid;
}
......@@ -62,13 +58,13 @@ public:
* @param ui32Ssrc ssrc
* @param ui32MtuSize mtu 大小
* @param ui32SampleRate 采样率
* @param ui8PlayloadType pt类型
* @param ui8PayloadType pt类型
* @param ui8Interleaved rtsp interleaved 值
*/
G711RtpEncoder(uint32_t ui32Ssrc,
uint32_t ui32MtuSize,
uint32_t ui32SampleRate,
uint8_t ui8PlayloadType = 0,
uint8_t ui8PayloadType = 0,
uint8_t ui8Interleaved = TrackAudio * 2);
~G711RtpEncoder() {}
......
......@@ -44,34 +44,77 @@ const char *memfind(const char *buf, int len, const char *subbuf, int sublen) {
return NULL;
}
void splitH264(const char *ptr, int len, const std::function<void(const char *, int)> &cb) {
auto nal = ptr;
void splitH264(const char *ptr, int len, int prefix, const std::function<void(const char *, int, int)> &cb) {
auto start = ptr + prefix;
auto end = ptr + len;
while(true) {
auto next_nal = memfind(nal + 3,end - nal - 3,"\x0\x0\x1",3);
if(next_nal){
if(*(next_nal - 1) == 0x00){
next_nal -= 1;
int next_prefix;
while (true) {
auto next_start = memfind(start, end - start, "\x00\x00\x01", 3);
if (next_start) {
//找到下一帧
if (*(next_start - 1) == 0x00) {
//这个是00 00 00 01开头
next_start -= 1;
next_prefix = 4;
} else {
//这个是00 00 01开头
next_prefix = 3;
}
cb(nal,next_nal - nal);
nal = next_nal;
//记得加上本帧prefix长度
cb(start - prefix, next_start - start + prefix, prefix);
//搜索下一帧末尾的起始位置
start = next_start + next_prefix;
//记录下一帧的prefix长度
prefix = next_prefix;
continue;
}
cb(nal,end - nal);
//未找到下一帧,这是最后一帧
cb(start - prefix, end - start + prefix, prefix);
break;
}
}
int prefixSize(const char *ptr, int len){
if (len < 4) {
return 0;
}
if (ptr[0] != 0x00 || ptr[1] != 0x00) {
//不是0x00 00开头
return 0;
}
if (ptr[2] == 0x00 && ptr[3] == 0x01) {
//是0x00 00 00 01
return 4;
}
if (ptr[2] == 0x01) {
//是0x00 00 01
return 3;
}
return 0;
}
#if 0
//splitH264函数测试程序
static onceToken s_token([](){
{
char buf[] = "\x00\x00\x00\x01\x12\x23\x34\x45\x56"
"\x00\x00\x00\x01\x12\x23\x34\x45\x56"
"\x00\x00\x00\x01\x12\x23\x34\x45\x56"
"\x00\x00\x00\x01\x23\x34\x45\x56"
"\x00\x00\x00\x01x34\x45\x56"
"\x00\x00\x01\x12\x23\x34\x45\x56";
splitH264(buf, sizeof(buf) - 1, [](const char *ptr, int len){
cout << hexdump(ptr, len) << endl;
splitH264(buf, sizeof(buf) - 1, 4, [](const char *ptr, int len, int prefix) {
cout << prefix << " " << hexdump(ptr, len) << endl;
});
}
{
char buf[] = "\x00\x00\x00\x01\x12\x23\x34\x45\x56";
splitH264(buf, sizeof(buf) - 1, 4, [](const char *ptr, int len, int prefix) {
cout << prefix << " " << hexdump(ptr, len) << endl;
});
}
});
#endif //0
......
......@@ -20,12 +20,12 @@ using namespace toolkit;
namespace mediakit{
bool getAVCInfo(const string &strSps,int &iVideoWidth, int &iVideoHeight, float &iVideoFps);
void splitH264(const char *ptr, int len, const std::function<void(const char *, int)> &cb);
void splitH264(const char *ptr, int len, int prefix, const std::function<void(const char *, int, int)> &cb);
int prefixSize(const char *ptr, int len);
/**
* 264帧类
*/
class H264Frame : public Frame {
class H264Frame : public FrameImp {
public:
typedef std::shared_ptr<H264Frame> Ptr;
......@@ -36,30 +36,8 @@ public:
NAL_SEI = 6,
} NalType;
char *data() const override{
return (char *)_buffer.data();
}
uint32_t size() const override {
return _buffer.size();
}
uint32_t dts() const override {
return _dts;
}
uint32_t pts() const override {
return _pts ? _pts : _dts;
}
uint32_t prefixSize() const override{
return _prefix_size;
}
TrackType getTrackType() const override{
return TrackVideo;
}
CodecId getCodecId() const override{
return CodecH264;
H264Frame(){
_codecid = CodecH264;
}
bool keyFrame() const override {
......@@ -69,39 +47,27 @@ public:
bool configFrame() const override{
switch(H264_TYPE(_buffer[_prefix_size]) ){
case H264Frame::NAL_SPS:
case H264Frame::NAL_PPS:
return true;
default:
return false;
case H264Frame::NAL_PPS:return true;
default:return false;
}
}
public:
uint32_t _dts = 0;
uint32_t _pts = 0;
uint32_t _prefix_size = 4;
string _buffer;
};
/**
* 防止内存拷贝的H264类
* 用户可以通过该类型快速把一个指针无拷贝的包装成Frame类
* 该类型在DevChannel中有使用
*/
class H264FrameNoCacheAble : public FrameNoCacheAble {
class H264FrameNoCacheAble : public FrameFromPtr {
public:
typedef std::shared_ptr<H264FrameNoCacheAble> Ptr;
H264FrameNoCacheAble(char *ptr,uint32_t size,uint32_t dts , uint32_t pts ,int prefixeSize = 4){
H264FrameNoCacheAble(char *ptr,uint32_t size,uint32_t dts , uint32_t pts ,int prefix_size = 4){
_ptr = ptr;
_size = size;
_dts = dts;
_pts = pts;
_prefixSize = prefixeSize;
}
TrackType getTrackType() const override{
return TrackVideo;
_prefix_size = prefix_size;
}
CodecId getCodecId() const override{
......@@ -109,43 +75,18 @@ public:
}
bool keyFrame() const override {
return H264_TYPE(_ptr[_prefixSize]) == H264Frame::NAL_IDR;
return H264_TYPE(_ptr[_prefix_size]) == H264Frame::NAL_IDR;
}
bool configFrame() const override{
switch(H264_TYPE(_ptr[_prefixSize])){
switch(H264_TYPE(_ptr[_prefix_size])){
case H264Frame::NAL_SPS:
case H264Frame::NAL_PPS:
return true;
default:
return false;
case H264Frame::NAL_PPS:return true;
default:return false;
}
}
};
/**
* 一个H264Frame类中可以有多个帧,他们通过 0x 00 00 01 分隔
* ZLMediaKit会先把这种复合帧split成单个帧然后再处理
* 一个复合帧可以通过无内存拷贝的方式切割成多个H264FrameSubFrame
* 提供该类的目的是切换复合帧时防止内存拷贝,提高性能
*/
template<typename Parent>
class FrameInternal : public Parent{
public:
typedef std::shared_ptr<FrameInternal> Ptr;
FrameInternal(const Frame::Ptr &parent_frame,
char *ptr,
uint32_t size,
int prefixeSize) : Parent(ptr,size,parent_frame->dts(),parent_frame->pts(),prefixeSize){
_parent_frame = parent_frame;
}
bool cacheAble() const override {
return _parent_frame->cacheAble();
}
private:
Frame::Ptr _parent_frame;
};
typedef FrameInternal<H264FrameNoCacheAble> H264FrameInternal;
/**
......@@ -243,24 +184,9 @@ public:
int type = H264_TYPE(*((uint8_t *)frame->data() + frame->prefixSize()));
if(type == H264Frame::NAL_SPS || type == H264Frame::NAL_SEI){
//有些设备会把SPS PPS IDR帧当做一个帧打包,所以我们要split一下
bool first_frame = true;
splitH264(frame->data() + frame->prefixSize(),
frame->size() - frame->prefixSize(),
[&](const char *ptr, int len){
if(first_frame){
H264FrameInternal::Ptr sub_frame = std::make_shared<H264FrameInternal>(frame,
frame->data(),
len + frame->prefixSize(),
frame->prefixSize());
inputFrame_l(sub_frame);
first_frame = false;
}else{
H264FrameInternal::Ptr sub_frame = std::make_shared<H264FrameInternal>(frame,
(char *)ptr,
len ,
3);
splitH264(frame->data(), frame->size(), frame->prefixSize(), [&](const char *ptr, int len, int prefix) {
H264FrameInternal::Ptr sub_frame = std::make_shared<H264FrameInternal>(frame, (char *)ptr, len, prefix);
inputFrame_l(sub_frame);
}
});
} else{
inputFrame_l(frame);
......@@ -302,6 +228,11 @@ private:
}
break;
case H264Frame::NAL_SEI:{
//忽略SEI
break;
}
default:
VideoTrack::inputFrame(frame);
break;
......@@ -349,29 +280,27 @@ private:
bool _last_frame_is_idr = false;
};
/**
* h264类型sdp
*/
class H264Sdp : public Sdp {
public:
/**
*
* @param sps 264 sps,不带0x00000001头
* @param pps 264 pps,不带0x00000001头
* @param playload_type rtp playload type 默认96
* @param payload_type rtp payload type 默认96
* @param bitrate 比特率
*/
H264Sdp(const string &strSPS,
const string &strPPS,
int playload_type = 96,
int bitrate = 4000) : Sdp(90000,playload_type) {
int payload_type = 96,
int bitrate = 4000) : Sdp(90000,payload_type) {
//视频通道
_printer << "m=video 0 RTP/AVP " << playload_type << "\r\n";
_printer << "m=video 0 RTP/AVP " << payload_type << "\r\n";
_printer << "b=AS:" << bitrate << "\r\n";
_printer << "a=rtpmap:" << playload_type << " H264/" << 90000 << "\r\n";
_printer << "a=fmtp:" << playload_type << " packetization-mode=1; profile-level-id=";
_printer << "a=rtpmap:" << payload_type << " H264/" << 90000 << "\r\n";
_printer << "a=fmtp:" << payload_type << " packetization-mode=1; profile-level-id=";
char strTemp[100];
uint32_t profile_level_id = 0;
......@@ -390,17 +319,13 @@ public:
memset(strTemp, 0, 100);
av_base64_encode(strTemp, 100, (uint8_t *) strPPS.data(), strPPS.size());
_printer << strTemp << "\r\n";
_printer << "a=control:trackID=" << getTrackType() << "\r\n";
_printer << "a=control:trackID=" << (int)TrackVideo << "\r\n";
}
string getSdp() const override {
return _printer;
}
TrackType getTrackType() const override {
return TrackVideo;
}
CodecId getCodecId() const override {
return CodecH264;
}
......@@ -408,8 +333,5 @@ private:
_StrPrinter _printer;
};
}//namespace mediakit
#endif //ZLMEDIAKIT_H264_H
\ No newline at end of file
......@@ -36,10 +36,6 @@ public:
*/
bool inputRtmp(const RtmpPacket::Ptr &rtmp, bool key_pos = true) override;
TrackType getTrackType() const override{
return TrackVideo;
}
CodecId getCodecId() const override{
return CodecH264;
}
......
......@@ -157,7 +157,7 @@ bool H264RtpDecoder::decodeRtp(const RtpPacket::Ptr &rtppack) {
if (rtppack->sequence != _lastSeq + 1 && rtppack->sequence != 0) {
//中间的或末尾的rtp包,其seq必须连续(如果回环了则判定为连续),否则说明rtp丢包,那么该帧不完整,必须得丢弃
_h264frame->_buffer.clear();
WarnL << "rtp sequence不连续: " << rtppack->sequence << " != " << _lastSeq << " + 1,该帧被废弃";
WarnL << "rtp丢包: " << rtppack->sequence << " != " << _lastSeq << " + 1,该帧被废弃";
return false;
}
......@@ -204,12 +204,12 @@ void H264RtpDecoder::onGetH264(const H264Frame::Ptr &frame) {
H264RtpEncoder::H264RtpEncoder(uint32_t ui32Ssrc,
uint32_t ui32MtuSize,
uint32_t ui32SampleRate,
uint8_t ui8PlayloadType,
uint8_t ui8PayloadType,
uint8_t ui8Interleaved) :
RtpInfo(ui32Ssrc,
ui32MtuSize,
ui32SampleRate,
ui8PlayloadType,
ui8PayloadType,
ui8Interleaved) {
}
......
......@@ -38,10 +38,6 @@ public:
*/
bool inputRtp(const RtpPacket::Ptr &rtp, bool key_pos = true) override;
TrackType getTrackType() const override{
return TrackVideo;
}
CodecId getCodecId() const override{
return CodecH264;
}
......@@ -66,13 +62,13 @@ public:
* @param ui32Ssrc ssrc
* @param ui32MtuSize mtu大小
* @param ui32SampleRate 采样率,强制为90000
* @param ui8PlayloadType pt类型
* @param ui8PayloadType pt类型
* @param ui8Interleaved rtsp interleaved
*/
H264RtpEncoder(uint32_t ui32Ssrc,
uint32_t ui32MtuSize = 1400,
uint32_t ui32SampleRate = 90000,
uint8_t ui8PlayloadType = 96,
uint8_t ui8PayloadType = 96,
uint8_t ui8Interleaved = TrackVideo * 2);
~H264RtpEncoder() {}
......
......@@ -23,9 +23,9 @@ namespace mediakit {
bool getHEVCInfo(const string &strVps, const string &strSps, int &iVideoWidth, int &iVideoHeight, float &iVideoFps);
/**
* 265帧类
*/
class H265Frame : public Frame {
* 265帧类
*/
class H265Frame : public FrameImp {
public:
typedef std::shared_ptr<H265Frame> Ptr;
......@@ -60,32 +60,8 @@ public:
NAL_SEI_SUFFIX = 40,
} NaleType;
char *data() const override {
return (char *) _buffer.data();
}
uint32_t size() const override {
return _buffer.size();
}
uint32_t dts() const override {
return _dts;
}
uint32_t pts() const override {
return _pts ? _pts : _dts;
}
uint32_t prefixSize() const override {
return _prefix_size;
}
TrackType getTrackType() const override {
return TrackVideo;
}
CodecId getCodecId() const override {
return CodecH265;
H265Frame(){
_codecid = CodecH265;
}
bool keyFrame() const override {
......@@ -96,39 +72,26 @@ public:
switch(H265_TYPE(_buffer[_prefix_size])){
case H265Frame::NAL_VPS:
case H265Frame::NAL_SPS:
case H265Frame::NAL_PPS:
return true;
default:
return false;
case H265Frame::NAL_PPS : return true;
default : return false;
}
}
static bool isKeyFrame(int type) {
return type >= NAL_BLA_W_LP && type <= NAL_RSV_IRAP_VCL23;
}
public:
uint32_t _dts = 0;
uint32_t _pts = 0;
uint32_t _prefix_size = 4;
string _buffer;
};
class H265FrameNoCacheAble : public FrameNoCacheAble {
class H265FrameNoCacheAble : public FrameFromPtr {
public:
typedef std::shared_ptr<H265FrameNoCacheAble> Ptr;
H265FrameNoCacheAble(char *ptr, uint32_t size, uint32_t dts,uint32_t pts, int prefixeSize = 4) {
H265FrameNoCacheAble(char *ptr, uint32_t size, uint32_t dts,uint32_t pts, int prefix_size = 4) {
_ptr = ptr;
_size = size;
_dts = dts;
_pts = pts;
_prefixSize = prefixeSize;
}
TrackType getTrackType() const override {
return TrackVideo;
_prefix_size = prefix_size;
}
CodecId getCodecId() const override {
......@@ -136,17 +99,15 @@ public:
}
bool keyFrame() const override {
return H265Frame::isKeyFrame(H265_TYPE(((uint8_t *) _ptr)[_prefixSize]));
return H265Frame::isKeyFrame(H265_TYPE(((uint8_t *) _ptr)[_prefix_size]));
}
bool configFrame() const override{
switch(H265_TYPE(((uint8_t *) _ptr)[_prefixSize])){
switch(H265_TYPE(((uint8_t *) _ptr)[_prefix_size])){
case H265Frame::NAL_VPS:
case H265Frame::NAL_SPS:
case H265Frame::NAL_PPS:
return true;
default:
return false;
case H265Frame::NAL_PPS:return true;
default:return false;
}
}
};
......@@ -184,7 +145,6 @@ public:
/**
* 返回不带0x00 00 00 01头的vps
* @return
*/
const string &getVps() const {
return _vps;
......@@ -192,7 +152,6 @@ public:
/**
* 返回不带0x00 00 00 01头的sps
* @return
*/
const string &getSps() const {
return _sps;
......@@ -200,7 +159,6 @@ public:
/**
* 返回不带0x00 00 00 01头的pps
* @return
*/
const string &getPps() const {
return _pps;
......@@ -212,7 +170,6 @@ public:
/**
* 返回视频高度
* @return
*/
int getVideoHeight() const override{
return _height ;
......@@ -220,7 +177,6 @@ public:
/**
* 返回视频宽度
* @return
*/
int getVideoWidth() const override{
return _width;
......@@ -228,7 +184,6 @@ public:
/**
* 返回视频fps
* @return
*/
float getVideoFps() const override{
return _fps;
......@@ -238,34 +193,18 @@ public:
return !_vps.empty() && !_sps.empty() && !_pps.empty();
}
/**
* 输入数据帧,并获取sps pps
* @param frame 数据帧
*/
void inputFrame(const Frame::Ptr &frame) override{
int type = H265_TYPE(*((uint8_t *)frame->data() + frame->prefixSize()));
if(frame->configFrame()){
bool first_frame = true;
splitH264(frame->data() + frame->prefixSize(),
frame->size() - frame->prefixSize(),
[&](const char *ptr, int len){
if(first_frame){
H265FrameInternal::Ptr sub_frame = std::make_shared<H265FrameInternal>(frame,
frame->data(),
len + frame->prefixSize(),
frame->prefixSize());
inputFrame_l(sub_frame);
first_frame = false;
}else{
H265FrameInternal::Ptr sub_frame = std::make_shared<H265FrameInternal>(frame,
(char *)ptr,
len ,
3);
if(frame->configFrame() || type == H265Frame::NAL_SEI_PREFIX){
splitH264(frame->data(), frame->size(), frame->prefixSize(), [&](const char *ptr, int len, int prefix){
H265FrameInternal::Ptr sub_frame = std::make_shared<H265FrameInternal>(frame, (char*)ptr, len, prefix);
inputFrame_l(sub_frame);
}
});
}else{
} else {
inputFrame_l(frame);
}
}
......@@ -367,47 +306,41 @@ private:
bool _last_frame_is_idr = false;
};
/**
* h265类型sdp
*/
class H265Sdp : public Sdp {
public:
/**
*
* 构造函数
* @param sps 265 sps,不带0x00000001头
* @param pps 265 pps,不带0x00000001头
* @param playload_type rtp playload type 默认96
* @param payload_type rtp payload type 默认96
* @param bitrate 比特率
*/
H265Sdp(const string &strVPS,
const string &strSPS,
const string &strPPS,
int playload_type = 96,
int bitrate = 4000) : Sdp(90000,playload_type) {
int payload_type = 96,
int bitrate = 4000) : Sdp(90000,payload_type) {
//视频通道
_printer << "m=video 0 RTP/AVP " << playload_type << "\r\n";
_printer << "m=video 0 RTP/AVP " << payload_type << "\r\n";
_printer << "b=AS:" << bitrate << "\r\n";
_printer << "a=rtpmap:" << playload_type << " H265/" << 90000 << "\r\n";
_printer << "a=fmtp:" << playload_type << " ";
_printer << "a=rtpmap:" << payload_type << " H265/" << 90000 << "\r\n";
_printer << "a=fmtp:" << payload_type << " ";
_printer << "sprop-vps=";
_printer << encodeBase64(strVPS) << "; ";
_printer << "sprop-sps=";
_printer << encodeBase64(strSPS) << "; ";
_printer << "sprop-pps=";
_printer << encodeBase64(strPPS) << "\r\n";
_printer << "a=control:trackID=" << getTrackType() << "\r\n";
_printer << "a=control:trackID=" << (int)TrackVideo << "\r\n";
}
string getSdp() const override {
return _printer;
}
TrackType getTrackType() const override {
return TrackVideo;
}
CodecId getCodecId() const override {
return CodecH265;
}
......@@ -415,9 +348,5 @@ private:
_StrPrinter _printer;
};
}//namespace mediakit
#endif //ZLMEDIAKIT_H265_H
\ No newline at end of file
......@@ -36,10 +36,6 @@ public:
*/
bool inputRtmp(const RtmpPacket::Ptr &rtmp, bool key_pos = true) override;
TrackType getTrackType() const override{
return TrackVideo;
}
CodecId getCodecId() const override{
return CodecH265;
}
......
......@@ -96,7 +96,7 @@ bool H265RtpDecoder::decodeRtp(const RtpPacket::Ptr &rtppack) {
if (rtppack->sequence != _lastSeq + 1 && rtppack->sequence != 0) {
//中间的或末尾的rtp包,其seq必须连续(如果回环了则判定为连续),否则说明rtp丢包,那么该帧不完整,必须得丢弃
_h265frame->_buffer.clear();
WarnL << "rtp sequence不连续: " << rtppack->sequence << " != " << _lastSeq << " + 1,该帧被废弃";
WarnL << "rtp丢包: " << rtppack->sequence << " != " << _lastSeq << " + 1,该帧被废弃";
return false;
}
......@@ -140,12 +140,12 @@ void H265RtpDecoder::onGetH265(const H265Frame::Ptr &frame) {
H265RtpEncoder::H265RtpEncoder(uint32_t ui32Ssrc,
uint32_t ui32MtuSize,
uint32_t ui32SampleRate,
uint8_t ui8PlayloadType,
uint8_t ui8PayloadType,
uint8_t ui8Interleaved) :
RtpInfo(ui32Ssrc,
ui32MtuSize,
ui32SampleRate,
ui8PlayloadType,
ui8PayloadType,
ui8Interleaved) {
}
......
......@@ -39,10 +39,6 @@ public:
*/
bool inputRtp(const RtpPacket::Ptr &rtp, bool key_pos = true) override;
TrackType getTrackType() const override{
return TrackVideo;
}
CodecId getCodecId() const override{
return CodecH265;
}
......@@ -67,13 +63,13 @@ public:
* @param ui32Ssrc ssrc
* @param ui32MtuSize mtu大小
* @param ui32SampleRate 采样率,强制为90000
* @param ui8PlayloadType pt类型
* @param ui8PayloadType pt类型
* @param ui8Interleaved rtsp interleaved
*/
H265RtpEncoder(uint32_t ui32Ssrc,
uint32_t ui32MtuSize = 1400,
uint32_t ui32SampleRate = 90000,
uint8_t ui8PlayloadType = 96,
uint8_t ui8PayloadType = 96,
uint8_t ui8Interleaved = TrackVideo * 2);
~H265RtpEncoder() {}
......
/*
* Copyright (c) 2016 The ZLMediaKit project authors. All Rights Reserved.
*
* This file is part of ZLMediaKit(https://github.com/xiongziliang/ZLMediaKit).
*
* Use of this source code is governed by MIT license that can be found in the
* LICENSE file in the root of the source tree. All contributing project authors
* may be found in the AUTHORS file in the root of the source tree.
*/
#include "Opus.h"
namespace mediakit{
Sdp::Ptr OpusTrack::getSdp() {
if(!ready()){
WarnL << getCodecName() << " Track未准备好";
return nullptr;
}
return std::make_shared<OpusSdp>(getAudioSampleRate(), getAudioChannel());
}
}//namespace mediakit
\ No newline at end of file
/*
* Copyright (c) 2016 The ZLMediaKit project authors. All Rights Reserved.
*
* This file is part of ZLMediaKit(https://github.com/xiongziliang/ZLMediaKit).
*
* Use of this source code is governed by MIT license that can be found in the
* LICENSE file in the root of the source tree. All contributing project authors
* may be found in the AUTHORS file in the root of the source tree.
*/
#ifndef ZLMEDIAKIT_OPUS_H
#define ZLMEDIAKIT_OPUS_H
#include "Frame.h"
#include "Track.h"
namespace mediakit{
/**
* Opus帧
*/
class OpusFrame : public FrameImp {
public:
typedef std::shared_ptr<OpusFrame> Ptr;
OpusFrame(){
_codecid = CodecOpus;
}
};
/**
* 不可缓存的Opus帧
*/
class OpusFrameNoCacheAble : public FrameFromPtr {
public:
typedef std::shared_ptr<OpusFrameNoCacheAble> Ptr;
OpusFrameNoCacheAble(char *ptr,uint32_t size,uint32_t dts, uint32_t pts = 0,int prefix_size = 0){
_ptr = ptr;
_size = size;
_dts = dts;
_prefix_size = prefix_size;
}
CodecId getCodecId() const override{
return CodecOpus;
}
bool keyFrame() const override {
return false;
}
bool configFrame() const override{
return false;
}
};
/**
* Opus帧音频通道
*/
class OpusTrack : public AudioTrackImp{
public:
typedef std::shared_ptr<OpusTrack> Ptr;
OpusTrack(int sample_rate, int channels, int sample_bit) : AudioTrackImp(CodecOpus,sample_rate,channels,sample_bit){}
private:
//克隆该Track
Track::Ptr clone() override {
return std::make_shared<std::remove_reference<decltype(*this)>::type >(*this);
}
//生成sdp
Sdp::Ptr getSdp() override ;
};
/**
* Opus类型SDP
*/
class OpusSdp : public Sdp {
public:
/**
* 构造opus sdp
* @param sample_rate 音频采样率
* @param payload_type rtp payload
* @param bitrate 比特率
*/
OpusSdp(int sample_rate,
int channels,
int payload_type = 98,
int bitrate = 128) : Sdp(sample_rate,payload_type){
_printer << "m=audio 0 RTP/AVP " << payload_type << "\r\n";
_printer << "a=rtpmap:" << payload_type << " opus/" << sample_rate << "/" << channels << "\r\n";
_printer << "a=control:trackID=" << (int)TrackAudio << "\r\n";
}
string getSdp() const override {
return _printer;
}
CodecId getCodecId() const override {
return CodecOpus;
}
private:
_StrPrinter _printer;
};
}//namespace mediakit
#endif //ZLMEDIAKIT_OPUS_H
......@@ -65,8 +65,6 @@ class VideoTrack : public Track {
public:
typedef std::shared_ptr<VideoTrack> Ptr;
TrackType getTrackType() const override { return TrackVideo;};
/**
* 返回视频高度
* @return
......@@ -93,8 +91,6 @@ class AudioTrack : public Track {
public:
typedef std::shared_ptr<AudioTrack> Ptr;
TrackType getTrackType() const override { return TrackAudio;};
/**
* 返回音频采样率
* @return
......@@ -114,6 +110,64 @@ public:
virtual int getAudioChannel() const {return 0;};
};
class AudioTrackImp : public AudioTrack{
public:
typedef std::shared_ptr<AudioTrackImp> Ptr;
/**
* 构造函数
* @param codecId 编码类型
* @param sample_rate 采样率(HZ)
* @param channels 通道数
* @param sample_bit 采样位数,一般为16
*/
AudioTrackImp(CodecId codecId,int sample_rate, int channels, int sample_bit){
_codecid = codecId;
_sample_rate = sample_rate;
_channels = channels;
_sample_bit = sample_bit;
}
/**
* 返回编码类型
*/
CodecId getCodecId() const override{
return _codecid;
}
/**
* 是否已经初始化
*/
bool ready() override {
return true;
}
/**
* 返回音频采样率
*/
int getAudioSampleRate() const override{
return _sample_rate;
}
/**
* 返回音频采样位数,一般为16或8
*/
int getAudioSampleBit() const override{
return _sample_bit;
}
/**
* 返回音频通道数
*/
int getAudioChannel() const override{
return _channels;
}
private:
CodecId _codecid;
int _sample_rate;
int _channels;
int _sample_bit;
};
class TrackSource{
public:
......@@ -123,7 +177,6 @@ public:
/**
* 获取全部的Track
* @param trackReady 是否获取全部已经准备好的Track
* @return
*/
virtual vector<Track::Ptr> getTracks(bool trackReady = true) const = 0;
......@@ -131,7 +184,6 @@ public:
* 获取特定Track
* @param type track类型
* @param trackReady 是否获取全部已经准备好的Track
* @return
*/
Track::Ptr getTrack(TrackType type , bool trackReady = true) const {
auto tracks = getTracks(trackReady);
......@@ -145,5 +197,4 @@ public:
};
}//namespace mediakit
#endif //ZLMEDIAKIT_TRACK_H
\ No newline at end of file
/*
* Copyright (c) 2020 The ZLMediaKit project authors. All Rights Reserved.
*
* This file is part of ZLMediaKit(https://github.com/xiongziliang/ZLMediaKit).
*
* Use of this source code is governed by MIT license that can be found in the
* LICENSE file in the root of the source tree. All contributing project authors
* may be found in the AUTHORS file in the root of the source tree.
*/
#include <cstdlib>
#include "HlsParser.h"
#include "Util/util.h"
#include "Common/Parser.h"
using namespace toolkit;
namespace mediakit {
bool HlsParser::parse(const string &http_url, const string &m3u8) {
float extinf_dur = 0;
ts_segment segment;
map<int, ts_segment> ts_map;
_total_dur = 0;
_is_live = true;
_is_m3u8_inner = false;
int index = 0;
auto lines = split(m3u8, "\n");
for (auto &line : lines) {
trim(line);
if (line.size() < 2) {
continue;
}
if ((_is_m3u8_inner || extinf_dur != 0) && line[0] != '#') {
segment.duration = extinf_dur;
if (line.find("http://") == 0 || line.find("https://") == 0) {
segment.url = line;
} else {
if (line.find("/") == 0) {
segment.url = http_url.substr(0, http_url.find("/", 8)) + line;
} else {
segment.url = http_url.substr(0, http_url.rfind("/") + 1) + line;
}
}
if (!_is_m3u8_inner) {
//ts按照先后顺序排序
ts_map.emplace(index++, segment);
} else {
//子m3u8按照带宽排序
ts_map.emplace(segment.bandwidth, segment);
}
extinf_dur = 0;
continue;
}
_is_m3u8_inner = false;
if (line.find("#EXTINF:") == 0) {
sscanf(line.data(), "#EXTINF:%f,", &extinf_dur);
_total_dur += extinf_dur;
continue;
}
static const string s_stream_inf = "#EXT-X-STREAM-INF:";
if (line.find(s_stream_inf) == 0) {
_is_m3u8_inner = true;
auto key_val = Parser::parseArgs(line.substr(s_stream_inf.size()), ",", "=");
segment.program_id = atoi(key_val["PROGRAM-ID"].data());
segment.bandwidth = atoi(key_val["BANDWIDTH"].data());
sscanf(key_val["RESOLUTION"].data(), "%dx%d", &segment.width, &segment.height);
continue;
}
if (line == "#EXTM3U") {
_is_m3u8 = true;
continue;
}
if (line.find("#EXT-X-ALLOW-CACHE:") == 0) {
_allow_cache = (line.find(":YES") != string::npos);
continue;
}
if (line.find("#EXT-X-VERSION:") == 0) {
sscanf(line.data(), "#EXT-X-VERSION:%d", &_version);
continue;
}
if (line.find("#EXT-X-TARGETDURATION:") == 0) {
sscanf(line.data(), "#EXT-X-TARGETDURATION:%d", &_target_dur);
continue;
}
if (line.find("#EXT-X-MEDIA-SEQUENCE:") == 0) {
sscanf(line.data(), "#EXT-X-MEDIA-SEQUENCE:%lld", &_sequence);
continue;
}
if (line.find("#EXT-X-ENDLIST") == 0) {
//点播
_is_live = false;
continue;
}
continue;
}
if (_is_m3u8) {
onParsed(_is_m3u8_inner, _sequence, ts_map);
}
return _is_m3u8;
}
bool HlsParser::isM3u8() const {
return _is_m3u8;
}
bool HlsParser::isLive() const{
return _is_live;
}
bool HlsParser::allowCache() const {
return _allow_cache;
}
int HlsParser::getVersion() const {
return _version;
}
int HlsParser::getTargetDur() const {
return _target_dur;
}
int HlsParser::getSequence() const {
return _sequence;
}
bool HlsParser::isM3u8Inner() const {
return _is_m3u8_inner;
}
}//namespace mediakit
\ No newline at end of file
/*
* Copyright (c) 2020 The ZLMediaKit project authors. All Rights Reserved.
*
* This file is part of ZLMediaKit(https://github.com/xiongziliang/ZLMediaKit).
*
* Use of this source code is governed by MIT license that can be found in the
* LICENSE file in the root of the source tree. All contributing project authors
* may be found in the AUTHORS file in the root of the source tree.
*/
#ifndef HTTP_HLSPARSER_H
#define HTTP_HLSPARSER_H
#include <string>
#include <list>
#include <map>
using namespace std;
namespace mediakit {
typedef struct{
//url地址
string url;
//ts切片长度
float duration;
//////内嵌m3u8//////
//节目id
int program_id;
//带宽
int bandwidth;
//宽度
int width;
//高度
int height;
} ts_segment;
class HlsParser {
public:
HlsParser(){}
~HlsParser(){}
bool parse(const string &http_url,const string &m3u8);
/**
* 是否存在#EXTM3U字段,是否为m3u8文件
*/
bool isM3u8() const;
/**
* #EXT-X-ALLOW-CACHE值,是否允许cache
*/
bool allowCache() const;
/**
* 是否存在#EXT-X-ENDLIST字段,是否为直播
*/
bool isLive() const ;
/**
* #EXT-X-VERSION值,版本号
*/
int getVersion() const;
/**
* #EXT-X-TARGETDURATION字段值
*/
int getTargetDur() const;
/**
* #EXT-X-MEDIA-SEQUENCE字段值,该m3u8序号
*/
int getSequence() const;
/**
* 内部是否含有子m3u8
*/
bool isM3u8Inner() const;
protected:
//解析出ts文件地址回调
virtual void onParsed(bool is_m3u8_inner,int64_t sequence,const map<int,ts_segment> &ts_list) {};
private:
bool _is_m3u8 = false;
bool _allow_cache = false;
bool _is_live = true;
int _version = 0;
int _target_dur = 0;
float _total_dur = 0;
int64_t _sequence = 0;
//每部是否有m3u8
bool _is_m3u8_inner = false;
};
}//namespace mediakit
#endif //HTTP_HLSPARSER_H
/*
* Copyright (c) 2020 The ZLMediaKit project authors. All Rights Reserved.
*
* This file is part of ZLMediaKit(https://github.com/xiongziliang/ZLMediaKit).
*
* Use of this source code is governed by MIT license that can be found in the
* LICENSE file in the root of the source tree. All contributing project authors
* may be found in the AUTHORS file in the root of the source tree.
*/
#include "HlsPlayer.h"
namespace mediakit {
HlsPlayer::HlsPlayer(const EventPoller::Ptr &poller){
_segment.setOnSegment([this](const char *data, uint64_t len) { onPacket(data, len); });
_poller = poller ? poller : EventPollerPool::Instance().getPoller();
}
HlsPlayer::~HlsPlayer() {}
void HlsPlayer::play(const string &strUrl) {
_m3u8_list.emplace_back(strUrl);
play_l();
}
void HlsPlayer::play_l(){
if (_m3u8_list.empty()) {
teardown_l(SockException(Err_shutdown, "所有hls url都尝试播放失败!"));
return;
}
float playTimeOutSec = (*this)[Client::kTimeoutMS].as<int>() / 1000.0;
setMethod("GET");
if(!(*this)[kNetAdapter].empty()) {
setNetAdapter((*this)[kNetAdapter]);
}
sendRequest(_m3u8_list.back(), playTimeOutSec);
}
void HlsPlayer::teardown_l(const SockException &ex){
_timer.reset();
_timer_ts.reset();
_http_ts_player.reset();
shutdown(ex);
}
void HlsPlayer::teardown() {
teardown_l(SockException(Err_shutdown,"teardown"));
}
void HlsPlayer::playNextTs(bool force){
if (_ts_list.empty()) {
//播放列表为空,那么立即重新下载m3u8文件
_timer.reset();
play_l();
return;
}
if (!force && _http_ts_player && _http_ts_player->alive()) {
//播放器目前还存活,正在下载中
return;
}
auto ts_duration = _ts_list.front().duration * 1000;
weak_ptr<HlsPlayer> weakSelf = dynamic_pointer_cast<HlsPlayer>(shared_from_this());
std::shared_ptr<Ticker> ticker(new Ticker);
_http_ts_player = std::make_shared<HttpTSPlayer>(getPoller(), false);
_http_ts_player->setOnDisconnect([weakSelf, ticker, ts_duration](const SockException &err) {
auto strongSelf = weakSelf.lock();
if (!strongSelf) {
return;
}
auto delay = ts_duration - 500 - ticker->elapsedTime();
if (delay <= 0) {
//播放这个ts切片花费了太长时间,我们立即下一个切片的播放
strongSelf->playNextTs(true);
} else {
//下一个切片慢点播放
strongSelf->_timer_ts.reset(new Timer(delay / 1000.0, [weakSelf, delay]() {
auto strongSelf = weakSelf.lock();
if (!strongSelf) {
return false;
}
strongSelf->playNextTs(true);
return false;
}, strongSelf->getPoller()));
}
});
_http_ts_player->setOnPacket([weakSelf](const char *data, uint64_t len) {
auto strongSelf = weakSelf.lock();
if (!strongSelf) {
return;
}
//收到ts包
strongSelf->onPacket_l(data, len);
});
_http_ts_player->setMethod("GET");
if(!(*this)[kNetAdapter].empty()) {
_http_ts_player->setNetAdapter((*this)[Client::kNetAdapter]);
}
_http_ts_player->sendRequest(_ts_list.front().url, 2 * _ts_list.front().duration);
_ts_list.pop_front();
}
void HlsPlayer::onParsed(bool is_m3u8_inner,int64_t sequence,const map<int,ts_segment> &ts_map){
if (!is_m3u8_inner) {
//这是ts播放列表
if (_last_sequence == sequence) {
return;
}
_last_sequence = sequence;
for (auto &pr : ts_map) {
auto &ts = pr.second;
if (_ts_url_cache.emplace(ts.url).second) {
//该ts未重复
_ts_list.emplace_back(ts);
//按时间排序
_ts_url_sort.emplace_back(ts.url);
}
}
if (_ts_url_sort.size() > 2 * ts_map.size()) {
//去除防重列表中过多的数据
_ts_url_cache.erase(_ts_url_sort.front());
_ts_url_sort.pop_front();
}
playNextTs();
} else {
//这是m3u8列表,我们播放最高清的子hls
if (ts_map.empty()) {
teardown_l(SockException(Err_shutdown, StrPrinter << "empty sub hls list:" + getUrl()));
return;
}
_timer.reset();
weak_ptr<HlsPlayer> weakSelf = dynamic_pointer_cast<HlsPlayer>(shared_from_this());
auto url = ts_map.rbegin()->second.url;
getPoller()->async([weakSelf, url]() {
auto strongSelf = weakSelf.lock();
if (strongSelf) {
strongSelf->play(url);
}
}, false);
}
}
int64_t HlsPlayer::onResponseHeader(const string &status, const HttpClient::HttpHeader &headers) {
if (status != "200" && status != "206") {
//失败
teardown_l(SockException(Err_shutdown, StrPrinter << "bad http status code:" + status));
return 0;
}
auto contet_type = const_cast< HttpClient::HttpHeader &>(headers)["Content-Type"];
_is_m3u8 = (contet_type.find("application/vnd.apple.mpegurl") == 0);
return -1;
}
void HlsPlayer::onResponseBody(const char *buf, int64_t size, int64_t recvedSize, int64_t totalSize) {
if (recvedSize == size) {
//刚开始
_m3u8.clear();
}
_m3u8.append(buf, size);
}
void HlsPlayer::onResponseCompleted() {
if (HlsParser::parse(getUrl(), _m3u8)) {
playDelay();
if (_first) {
_first = false;
onPlayResult(SockException(Err_success, "play success"));
}
} else {
teardown_l(SockException(Err_shutdown, "解析m3u8文件失败"));
}
}
float HlsPlayer::delaySecond(){
if (HlsParser::isM3u8() && HlsParser::getTargetDur() > 0) {
return HlsParser::getTargetDur();
}
return 1;
}
void HlsPlayer::onDisconnect(const SockException &ex) {
if (_first) {
//第一次失败,则播放失败
_first = false;
onPlayResult(ex);
return;
}
//主动shutdown
if (ex.getErrCode() == Err_shutdown) {
if (_m3u8_list.size() <= 1) {
//全部url都播放失败
onShutdown(ex);
} else {
_m3u8_list.pop_back();
//还有上一级url可以重试播放
play_l();
}
return;
}
//eof等,之后播放失败,那么重试播放m3u8
playDelay();
}
bool HlsPlayer::onRedirectUrl(const string &url,bool temporary) {
_m3u8_list.emplace_back(url);
return true;
}
void HlsPlayer::playDelay(){
weak_ptr<HlsPlayer> weakSelf = dynamic_pointer_cast<HlsPlayer>(shared_from_this());
_timer.reset(new Timer(delaySecond(), [weakSelf]() {
auto strongSelf = weakSelf.lock();
if (strongSelf) {
strongSelf->play_l();
}
return false;
}, getPoller()));
}
void HlsPlayer::onPacket_l(const char *data, uint64_t len){
_segment.input(data,len);
}
//////////////////////////////////////////////////////////////////////////
HlsPlayerImp::HlsPlayerImp(const EventPoller::Ptr &poller) : PlayerImp<HlsPlayer, PlayerBase>(poller) {
}
void HlsPlayerImp::setOnPacket(const TSSegment::onSegment &cb){
_on_ts = cb;
}
void HlsPlayerImp::onPacket(const char *data,uint64_t len) {
if (_on_ts) {
_on_ts(data, len);
}
if (!_decoder) {
_decoder = DecoderImp::createDecoder(DecoderImp::decoder_ts, this);
}
if (_decoder) {
_decoder->input((uint8_t *) data, len);
}
}
void HlsPlayerImp::onAllTrackReady() {
PlayerImp<HlsPlayer, PlayerBase>::onPlayResult(SockException(Err_success,"play hls success"));
}
void HlsPlayerImp::onPlayResult(const SockException &ex) {
if(ex){
PlayerImp<HlsPlayer, PlayerBase>::onPlayResult(ex);
}else{
_stamp[TrackAudio].syncTo(_stamp[TrackVideo]);
_ticker.resetTime();
weak_ptr<HlsPlayerImp> weakSelf = dynamic_pointer_cast<HlsPlayerImp>(shared_from_this());
//每50毫秒执行一次
_timer = std::make_shared<Timer>(0.05, [weakSelf]() {
auto strongSelf = weakSelf.lock();
if (!strongSelf) {
return false;
}
strongSelf->onTick();
return true;
}, getPoller());
}
}
void HlsPlayerImp::onShutdown(const SockException &ex) {
PlayerImp<HlsPlayer, PlayerBase>::onShutdown(ex);
_timer = nullptr;
}
vector<Track::Ptr> HlsPlayerImp::getTracks(bool trackReady) const {
return MediaSink::getTracks(trackReady);
}
void HlsPlayerImp::inputFrame(const Frame::Ptr &frame) {
//计算相对时间戳
int64_t dts, pts;
_stamp[frame->getTrackType()].revise(frame->dts(), frame->pts(), dts, pts);
//根据时间戳缓存frame
_frame_cache.emplace(dts, Frame::getCacheAbleFrame(frame));
while (!_frame_cache.empty()) {
if (_frame_cache.rbegin()->first - _frame_cache.begin()->first > 30 * 1000) {
//缓存超过30秒,强制消费掉
MediaSink::inputFrame(_frame_cache.begin()->second);
_frame_cache.erase(_frame_cache.begin());
continue;
}
//缓存小于30秒
break;
}
}
void HlsPlayerImp::onTick() {
auto it = _frame_cache.begin();
while (it != _frame_cache.end()) {
if (it->first > _ticker.elapsedTime()) {
//这些帧还未到时间播放
break;
}
//消费掉已经到期的帧
MediaSink::inputFrame(it->second);
it = _frame_cache.erase(it);
}
}
}//namespace mediakit
\ No newline at end of file
/*
* Copyright (c) 2020 The ZLMediaKit project authors. All Rights Reserved.
*
* This file is part of ZLMediaKit(https://github.com/xiongziliang/ZLMediaKit).
*
* Use of this source code is governed by MIT license that can be found in the
* LICENSE file in the root of the source tree. All contributing project authors
* may be found in the AUTHORS file in the root of the source tree.
*/
#ifndef HTTP_HLSPLAYER_H
#define HTTP_HLSPLAYER_H
#include <unordered_set>
#include "Util/util.h"
#include "Poller/Timer.h"
#include "Http/HttpDownloader.h"
#include "Player/MediaPlayer.h"
#include "HlsParser.h"
#include "HttpTSPlayer.h"
#include "Rtp/Decoder.h"
#include "Rtp/TSDecoder.h"
using namespace toolkit;
namespace mediakit {
class HlsPlayer : public HttpClientImp , public PlayerBase , public HlsParser{
public:
HlsPlayer(const EventPoller::Ptr &poller);
~HlsPlayer() override;
/**
* 开始播放
* @param strUrl
*/
void play(const string &strUrl) override;
/**
* 停止播放
*/
void teardown() override;
protected:
/**
* 收到ts包
* @param data ts数据负载
* @param len ts包长度
*/
virtual void onPacket(const char *data, uint64_t len) = 0;
private:
/**
* 解析m3u8成功
* @param is_m3u8_inner 是否为m3u8列表
* @param sequence ts列表seq
* @param ts_map ts列表或m3u8列表
*/
void onParsed(bool is_m3u8_inner,int64_t sequence,const map<int,ts_segment> &ts_map) override;
/**
* 收到http回复头
* @param status 状态码,譬如:200 OK
* @param headers http头
* @return 返回后续content的长度;-1:后续数据全是content;>=0:固定长度content
* 需要指出的是,在http头中带有Content-Length字段时,该返回值无效
*/
int64_t onResponseHeader(const string &status,const HttpHeader &headers) override;
/**
* 收到http conten数据
* @param buf 数据指针
* @param size 数据大小
* @param recvedSize 已收数据大小(包含本次数据大小),当其等于totalSize时将触发onResponseCompleted回调
* @param totalSize 总数据大小
*/
void onResponseBody(const char *buf,int64_t size,int64_t recvedSize,int64_t totalSize) override;
/**
* 接收http回复完毕,
*/
void onResponseCompleted() override;
/**
* http链接断开回调
* @param ex 断开原因
*/
void onDisconnect(const SockException &ex) override;
/**
* 重定向事件
* @param url 重定向url
* @param temporary 是否为临时重定向
* @return 是否继续
*/
bool onRedirectUrl(const string &url,bool temporary) override;
private:
void playDelay();
float delaySecond();
void playNextTs(bool force = false);
void teardown_l(const SockException &ex);
void play_l();
void onPacket_l(const char *data, uint64_t len);
private:
struct UrlComp {
//url忽略?后面的参数
bool operator()(const string& __x, const string& __y) const {
return split(__x,"?")[0] < split(__y,"?")[0];
}
};
private:
bool _is_m3u8 = false;
bool _first = true;
int64_t _last_sequence = -1;
string _m3u8;
Timer::Ptr _timer;
Timer::Ptr _timer_ts;
list<ts_segment> _ts_list;
list<string> _ts_url_sort;
list<string> _m3u8_list;
set<string, UrlComp> _ts_url_cache;
HttpTSPlayer::Ptr _http_ts_player;
TSSegment _segment;
};
class HlsPlayerImp : public PlayerImp<HlsPlayer, PlayerBase> , public MediaSink{
public:
typedef std::shared_ptr<HlsPlayerImp> Ptr;
HlsPlayerImp(const EventPoller::Ptr &poller = nullptr);
~HlsPlayerImp() override {};
void setOnPacket(const TSSegment::onSegment &cb);
private:
void onPacket(const char *data, uint64_t len) override;
void onAllTrackReady() override;
void onPlayResult(const SockException &ex) override;
vector<Track::Ptr> getTracks(bool trackReady = true) const override;
void inputFrame(const Frame::Ptr &frame) override;
void onShutdown(const SockException &ex) override;
void onTick();
private:
TSSegment::onSegment _on_ts;
DecoderImp::Ptr _decoder;
multimap<int64_t, Frame::Ptr> _frame_cache;
Timer::Ptr _timer;
Ticker _ticker;
Stamp _stamp[2];
};
}//namespace mediakit
#endif //HTTP_HLSPLAYER_H
......@@ -48,8 +48,7 @@ public:
}
};
class HttpClient : public TcpClient , public HttpRequestSplitter
{
class HttpClient : public TcpClient , public HttpRequestSplitter{
public:
typedef StrCaseMap HttpHeader;
typedef std::shared_ptr<HttpClient> Ptr;
......
......@@ -13,9 +13,7 @@
#include "HttpClient.h"
#include "Util/SSLBox.h"
using namespace toolkit;
namespace mediakit {
class HttpClientImp: public TcpClientWithSSL<HttpClient> {
......@@ -28,5 +26,4 @@ protected:
};
} /* namespace mediakit */
#endif /* SRC_HTTP_HTTPCLIENTIMP_H_ */
......@@ -188,65 +188,65 @@ bool HttpSession::checkLiveFlvStream(const function<void()> &cb){
bool bClose = !strcasecmp(_parser["Connection"].data(),"close");
weak_ptr<HttpSession> weakSelf = dynamic_pointer_cast<HttpSession>(shared_from_this());
MediaSource::findAsync(_mediaInfo,weakSelf.lock(),[weakSelf,bClose,this,cb](const MediaSource::Ptr &src){
//鉴权结果回调
auto onRes = [cb, weakSelf, bClose](const string &err){
auto strongSelf = weakSelf.lock();
if (!strongSelf) {
//本对象已经销毁
return;
}
if(!err.empty()){
//播放鉴权失败
strongSelf->sendResponse("401 Unauthorized", bClose, nullptr, KeyValue(), std::make_shared<HttpStringBody>(err));
return;
}
//异步查找rtmp流
MediaSource::findAsync(strongSelf->_mediaInfo, strongSelf, [weakSelf, bClose, cb](const MediaSource::Ptr &src) {
auto strongSelf = weakSelf.lock();
if(!strongSelf){
if (!strongSelf) {
//本对象已经销毁
return;
}
auto rtmp_src = dynamic_pointer_cast<RtmpMediaSource>(src);
if(!rtmp_src){
if (!rtmp_src) {
//未找到该流
sendNotFound(bClose);
strongSelf->sendNotFound(bClose);
return;
}
//找到流了
auto onRes = [this,rtmp_src,cb](const string &err){
bool authSuccess = err.empty();
if(!authSuccess){
sendResponse("401 Unauthorized", true, nullptr, KeyValue(), std::make_shared<HttpStringBody>(err));
return ;
}
if(!cb) {
if (!cb) {
//找到rtmp源,发送http头,负载后续发送
sendResponse("200 OK", false, "video/x-flv",KeyValue(),nullptr,true);
}else{
strongSelf->sendResponse("200 OK", false, "video/x-flv", KeyValue(), nullptr, true);
} else {
//自定义发送http头
cb();
}
//http-flv直播牺牲延时提升发送性能
setSocketFlags();
try{
start(getPoller(),rtmp_src);
_is_flv_stream = true;
}catch (std::exception &ex){
//该rtmp源不存在
shutdown(SockException(Err_shutdown,"rtmp mediasource released"));
}
strongSelf->setSocketFlags();
strongSelf->start(strongSelf->getPoller(), rtmp_src);
strongSelf->_is_flv_stream = true;
});
};
weak_ptr<HttpSession> weakSelf = dynamic_pointer_cast<HttpSession>(shared_from_this());
Broadcast::AuthInvoker invoker = [weakSelf,onRes](const string &err){
Broadcast::AuthInvoker invoker = [weakSelf, onRes](const string &err) {
auto strongSelf = weakSelf.lock();
if(!strongSelf){
return;
}
strongSelf->async([weakSelf,onRes,err](){
auto strongSelf = weakSelf.lock();
if(!strongSelf){
if (!strongSelf) {
return;
}
strongSelf->async([onRes, err]() {
onRes(err);
});
};
auto flag = NoticeCenter::Instance().emitEvent(Broadcast::kBroadcastMediaPlayed,_mediaInfo,invoker,static_cast<SockInfo &>(*this));
if(!flag){
auto flag = NoticeCenter::Instance().emitEvent(Broadcast::kBroadcastMediaPlayed, _mediaInfo, invoker, static_cast<SockInfo &>(*this));
if (!flag) {
//该事件无人监听,默认不鉴权
onRes("");
}
});
return true;
}
......
/*
* Copyright (c) 2020 The ZLMediaKit project authors. All Rights Reserved.
*
* This file is part of ZLMediaKit(https://github.com/xiongziliang/ZLMediaKit).
*
* Use of this source code is governed by MIT license that can be found in the
* LICENSE file in the root of the source tree. All contributing project authors
* may be found in the AUTHORS file in the root of the source tree.
*/
#include "HttpTSPlayer.h"
namespace mediakit {
HttpTSPlayer::HttpTSPlayer(const EventPoller::Ptr &poller, bool split_ts){
_segment.setOnSegment([this](const char *data, uint64_t len) { onPacket(data, len); });
_poller = poller ? poller : EventPollerPool::Instance().getPoller();
_split_ts = split_ts;
}
HttpTSPlayer::~HttpTSPlayer() {}
int64_t HttpTSPlayer::onResponseHeader(const string &status, const HttpClient::HttpHeader &headers) {
if (status != "200" && status != "206") {
//http状态码不符合预期
shutdown(SockException(Err_other, StrPrinter << "bad http status code:" + status));
return 0;
}
auto contet_type = const_cast< HttpClient::HttpHeader &>(headers)["Content-Type"];
if (contet_type.find("video/mp2t") == 0 || contet_type.find("video/mpeg") == 0) {
_is_ts_content = true;
}
//后续是不定长content
return -1;
}
void HttpTSPlayer::onResponseBody(const char *buf, int64_t size, int64_t recvedSize, int64_t totalSize) {
if (recvedSize == size) {
//开始接收数据
if (buf[0] == TS_SYNC_BYTE) {
//这是ts头
_is_first_packet_ts = true;
} else {
WarnL << "可能不是http-ts流";
}
}
if (_split_ts) {
_segment.input(buf, size);
} else {
onPacket(buf, size);
}
}
void HttpTSPlayer::onResponseCompleted() {
//接收完毕
shutdown(SockException(Err_success, "play completed"));
}
void HttpTSPlayer::onDisconnect(const SockException &ex) {
if (_on_disconnect) {
_on_disconnect(ex);
_on_disconnect = nullptr;
}
}
void HttpTSPlayer::onPacket(const char *data, uint64_t len) {
if (_on_segment) {
_on_segment(data, len);
}
}
void HttpTSPlayer::setOnDisconnect(const HttpTSPlayer::onShutdown &cb) {
_on_disconnect = cb;
}
void HttpTSPlayer::setOnPacket(const TSSegment::onSegment &cb) {
_on_segment = cb;
}
}//namespace mediakit
\ No newline at end of file
/*
* Copyright (c) 2020 The ZLMediaKit project authors. All Rights Reserved.
*
* This file is part of ZLMediaKit(https://github.com/xiongziliang/ZLMediaKit).
*
* Use of this source code is governed by MIT license that can be found in the
* LICENSE file in the root of the source tree. All contributing project authors
* may be found in the AUTHORS file in the root of the source tree.
*/
#ifndef HTTP_HTTPTSPLAYER_H
#define HTTP_HTTPTSPLAYER_H
#include "Http/HttpDownloader.h"
#include "Player/MediaPlayer.h"
#include "Rtp/TSDecoder.h"
using namespace toolkit;
namespace mediakit {
//http-ts播发器,未实现ts解复用
class HttpTSPlayer : public HttpClientImp{
public:
typedef function<void(const SockException &)> onShutdown;
typedef std::shared_ptr<HttpTSPlayer> Ptr;
HttpTSPlayer(const EventPoller::Ptr &poller = nullptr, bool split_ts = true);
~HttpTSPlayer() override ;
//设置异常断开回调
void setOnDisconnect(const onShutdown &cb);
//设置接收ts包回调
void setOnPacket(const TSSegment::onSegment &cb);
protected:
///HttpClient override///
int64_t onResponseHeader(const string &status,const HttpHeader &headers) override;
void onResponseBody(const char *buf,int64_t size,int64_t recvedSize,int64_t totalSize) override;
void onResponseCompleted() override;
void onDisconnect(const SockException &ex) override ;
//收到ts包
virtual void onPacket(const char *data, uint64_t len);
private:
//是否为mpegts负载
bool _is_ts_content = false;
//第一个包是否为ts包
bool _is_first_packet_ts = false;
//是否判断是否是ts并split
bool _split_ts;
TSSegment _segment;
onShutdown _on_disconnect;
TSSegment::onSegment _on_segment;
};
}//namespace mediakit
#endif //HTTP_HTTPTSPLAYER_H
......@@ -94,6 +94,20 @@ public:
_onRecv = nullptr;
sendRequest(http_url,fTimeOutSec);
}
void closeWsClient(){
if(!_onRecv){
//未连接
return;
}
WebSocketHeader header;
header._fin = true;
header._reserved = 0;
header._opcode = CLOSE;
//客户端需要加密
header._mask_flag = true;
WebSocketSplitter::encode(header, nullptr);
}
protected:
//HttpClientImp override
......@@ -110,7 +124,8 @@ protected:
if(Sec_WebSocket_Accept == const_cast<HttpHeader &>(headers)["Sec-WebSocket-Accept"]){
//success
onWebSocketException(SockException());
return 0;
//后续全是websocket负载数据
return -1;
}
shutdown(SockException(Err_shutdown,StrPrinter << "Sec-WebSocket-Accept mismatch"));
return 0;
......@@ -125,6 +140,16 @@ protected:
*/
void onResponseCompleted() override {}
/**
* 接收websocket负载数据
*/
void onResponseBody(const char *buf,int64_t size,int64_t recvedSize,int64_t totalSize) override{
if(_onRecv){
//完成websocket握手后,拦截websocket数据并解析
_onRecv(buf, size);
}
};
//TcpClient override
/**
......@@ -168,20 +193,6 @@ protected:
}
/**
* tcp收到数据
* @param pBuf
*/
void onRecv(const Buffer::Ptr &pBuf) override{
if(_onRecv){
//完成websocket握手后,拦截websocket数据并解析
_onRecv(pBuf);
}else{
//websocket握手数据
HttpClientImp::onRecv(pBuf);
}
}
/**
* tcp连接断开
* @param ex
*/
......@@ -193,7 +204,7 @@ protected:
//WebSocketSplitter override
/**
* 收到一个webSocket数据包包头,后续将继续触发onWebSocketDecodePlayload回调
* 收到一个webSocket数据包包头,后续将继续触发onWebSocketDecodePayload回调
* @param header 数据包头
*/
void onWebSocketDecodeHeader(const WebSocketHeader &header) override{
......@@ -205,9 +216,9 @@ protected:
* @param header 数据包包头
* @param ptr 负载数据指针
* @param len 负载数据长度
* @param recved 已接收数据长度(包含本次数据长度),等于header._playload_len时则接受完毕
* @param recved 已接收数据长度(包含本次数据长度),等于header._payload_len时则接受完毕
*/
void onWebSocketDecodePlayload(const WebSocketHeader &header, const uint8_t *ptr, uint64_t len, uint64_t recved) override{
void onWebSocketDecodePayload(const WebSocketHeader &header, const uint8_t *ptr, uint64_t len, uint64_t recved) override{
_payload.append((char *)ptr,len);
}
......@@ -285,9 +296,9 @@ private:
//触发连接成功事件
_delegate.onConnect(ex);
//拦截websocket数据接收
_onRecv = [this](const Buffer::Ptr &pBuf){
_onRecv = [this](const char *data, int len){
//解析websocket数据包
this->WebSocketSplitter::decode((uint8_t*)pBuf->data(),pBuf->size());
this->WebSocketSplitter::decode((uint8_t *)data, len);
};
return;
}
......@@ -306,7 +317,7 @@ private:
private:
string _Sec_WebSocket_Key;
function<void(const Buffer::Ptr &pBuf)> _onRecv;
function<void(const char *data, int len)> _onRecv;
ClientTypeImp<ClientType,DataType> &_delegate;
string _payload;
};
......@@ -328,7 +339,9 @@ public:
WebSocketClient(ArgsType &&...args) : ClientTypeImp<ClientType,DataType>(std::forward<ArgsType>(args)...){
_wsClient.reset(new HttpWsClient<ClientType,DataType>(*this));
}
~WebSocketClient() override {}
~WebSocketClient() override {
_wsClient->closeWsClient();
}
/**
* 重载startConnect方法,
......
......@@ -161,7 +161,7 @@ protected:
* @param len
* @param recved
*/
void onWebSocketDecodePlayload(const WebSocketHeader &packet,const uint8_t *ptr,uint64_t len,uint64_t recved) override {
void onWebSocketDecodePayload(const WebSocketHeader &packet,const uint8_t *ptr,uint64_t len,uint64_t recved) override {
_remian_data.append((char *)ptr,len);
}
......@@ -205,7 +205,7 @@ protected:
* @param buffer
*/
void onWebSocketEncodeData(const Buffer::Ptr &buffer) override{
SocketHelper::send(buffer);
HttpSessionType::send(buffer);
}
private:
string _remian_data;
......
......@@ -72,16 +72,16 @@ begin_decode:
CHECK_LEN(1);
_mask_flag = (*ptr & 0x80) >> 7;
_playload_len = (*ptr & 0x7F);
_payload_len = (*ptr & 0x7F);
ptr += 1;
if (_playload_len == 126) {
if (_payload_len == 126) {
CHECK_LEN(2);
_playload_len = (*ptr << 8) | *(ptr + 1);
_payload_len = (*ptr << 8) | *(ptr + 1);
ptr += 2;
} else if (_playload_len == 127) {
} else if (_payload_len == 127) {
CHECK_LEN(8);
_playload_len = ((uint64_t) ptr[0] << (8 * 7)) |
_payload_len = ((uint64_t) ptr[0] << (8 * 7)) |
((uint64_t) ptr[1] << (8 * 6)) |
((uint64_t) ptr[2] << (8 * 5)) |
((uint64_t) ptr[3] << (8 * 4)) |
......@@ -98,9 +98,9 @@ begin_decode:
}
_got_header = true;
_mask_offset = 0;
_playload_offset = 0;
_payload_offset = 0;
onWebSocketDecodeHeader(*this);
if(_playload_len == 0){
if(_payload_len == 0){
onWebSocketDecodeComplete(*this);
}
}
......@@ -109,19 +109,19 @@ begin_decode:
uint64_t remain = len - (ptr - data);
if(remain > 0){
uint64_t playload_slice_len = remain;
if(playload_slice_len + _playload_offset > _playload_len){
playload_slice_len = _playload_len - _playload_offset;
uint64_t payload_slice_len = remain;
if(payload_slice_len + _payload_offset > _payload_len){
payload_slice_len = _payload_len - _payload_offset;
}
_playload_offset += playload_slice_len;
onPlayloadData(ptr,playload_slice_len);
_payload_offset += payload_slice_len;
onPayloadData(ptr, payload_slice_len);
if(_playload_offset == _playload_len){
if(_payload_offset == _payload_len){
onWebSocketDecodeComplete(*this);
//这是下一个包
remain -= playload_slice_len;
ptr += playload_slice_len;
remain -= payload_slice_len;
ptr += payload_slice_len;
_got_header = false;
if(remain > 0){
......@@ -138,14 +138,14 @@ begin_decode:
_remain_data.clear();
}
void WebSocketSplitter::onPlayloadData(uint8_t *ptr, uint64_t len) {
void WebSocketSplitter::onPayloadData(uint8_t *data, uint64_t len) {
if(_mask_flag){
for(int i = 0; i < len ; ++i,++ptr){
*(ptr) ^= _mask[(i + _mask_offset) % 4];
for(int i = 0; i < len ; ++i,++data){
*(data) ^= _mask[(i + _mask_offset) % 4];
}
_mask_offset = (_mask_offset + len) % 4;
}
onWebSocketDecodePlayload(*this, _mask_flag ? ptr - len : ptr, len, _playload_offset);
onWebSocketDecodePayload(*this, _mask_flag ? data - len : data, len, _payload_offset);
}
void WebSocketSplitter::encode(const WebSocketHeader &header,const Buffer::Ptr &buffer) {
......
......@@ -44,14 +44,19 @@ public:
CONTROL_RSVF = 0xF
} Type;
public:
WebSocketHeader() : _mask(4){}
WebSocketHeader() : _mask(4){
//获取_mask内部buffer的内存地址,该内存是malloc开辟的,地址为随机
uint64_t ptr = (uint64_t)(&_mask[0]);
//根据内存地址设置掩码随机数
_mask.assign((uint8_t*)(&ptr), (uint8_t*)(&ptr) + 4);
}
virtual ~WebSocketHeader(){}
public:
bool _fin;
uint8_t _reserved;
Type _opcode;
bool _mask_flag;
uint64_t _playload_len;
uint64_t _payload_len;
vector<uint8_t > _mask;
};
......@@ -62,7 +67,7 @@ public:
/**
* 输入数据以便解包webSocket数据以及处理粘包问题
* 可能触发onWebSocketDecodeHeader和onWebSocketDecodePlayload回调
* 可能触发onWebSocketDecodeHeader和onWebSocketDecodePayload回调
* @param data 需要解包的数据,可能是不完整的包或多个包
* @param len 数据长度
*/
......@@ -77,7 +82,7 @@ public:
void encode(const WebSocketHeader &header,const Buffer::Ptr &buffer);
protected:
/**
* 收到一个webSocket数据包包头,后续将继续触发onWebSocketDecodePlayload回调
* 收到一个webSocket数据包包头,后续将继续触发onWebSocketDecodePayload回调
* @param header 数据包头
*/
virtual void onWebSocketDecodeHeader(const WebSocketHeader &header) {};
......@@ -87,9 +92,9 @@ protected:
* @param header 数据包包头
* @param ptr 负载数据指针
* @param len 负载数据长度
* @param recved 已接收数据长度(包含本次数据长度),等于header._playload_len时则接受完毕
* @param recved 已接收数据长度(包含本次数据长度),等于header._payload_len时则接受完毕
*/
virtual void onWebSocketDecodePlayload(const WebSocketHeader &header, const uint8_t *ptr, uint64_t len, uint64_t recved) {};
virtual void onWebSocketDecodePayload(const WebSocketHeader &header, const uint8_t *ptr, uint64_t len, uint64_t recved) {};
/**
......@@ -105,12 +110,12 @@ protected:
*/
virtual void onWebSocketEncodeData(const Buffer::Ptr &buffer){};
private:
void onPlayloadData(uint8_t *data,uint64_t len);
void onPayloadData(uint8_t *data, uint64_t len);
private:
string _remain_data;
int _mask_offset = 0;
bool _got_header = false;
uint64_t _playload_offset = 0;
uint64_t _payload_offset = 0;
};
} /* namespace mediakit */
......
......@@ -12,18 +12,31 @@
#include "PlayerBase.h"
#include "Rtsp/RtspPlayerImp.h"
#include "Rtmp/RtmpPlayerImp.h"
#include "Http/HlsPlayer.h"
using namespace toolkit;
namespace mediakit {
PlayerBase::Ptr PlayerBase::createPlayer(const EventPoller::Ptr &poller,const string &strUrl) {
//字符串是否以xx结尾
static bool end_of(const string &str, const string &substr){
auto pos = str.rfind(substr);
return pos != string::npos && pos == str.size() - substr.size();
}
PlayerBase::Ptr PlayerBase::createPlayer(const EventPoller::Ptr &poller,const string &url_in) {
static auto releasePlayer = [](PlayerBase *ptr){
onceToken token(nullptr,[&](){
delete ptr;
});
ptr->teardown();
};
string prefix = FindField(strUrl.data(), NULL, "://");
string url = url_in;
string prefix = FindField(url.data(), NULL, "://");
auto pos = url.find('?');
if (pos != string::npos) {
//去除?后面的字符串
url = url.substr(0, pos);
}
if (strcasecmp("rtsps",prefix.data()) == 0) {
return PlayerBase::Ptr(new TcpClientWithSSL<RtspPlayerImp>(poller),releasePlayer);
......@@ -41,6 +54,10 @@ PlayerBase::Ptr PlayerBase::createPlayer(const EventPoller::Ptr &poller,const st
return PlayerBase::Ptr(new RtmpPlayerImp(poller),releasePlayer);
}
if ((strcasecmp("http",prefix.data()) == 0 || strcasecmp("https",prefix.data()) == 0) && end_of(url, ".m3u8")) {
return PlayerBase::Ptr(new HlsPlayerImp(poller),releasePlayer);
}
return PlayerBase::Ptr(new RtspPlayerImp(poller),releasePlayer);
}
......
......@@ -13,8 +13,6 @@ namespace mediakit {
HlsMaker::HlsMaker(float seg_duration, uint32_t seg_number) {
//最小允许设置为0,0个切片代表点播
seg_number = MAX(0,seg_number);
seg_duration = MAX(1,seg_duration);
_seg_number = seg_number;
_seg_duration = seg_duration;
}
......@@ -34,6 +32,8 @@ void HlsMaker::makeIndexFile(bool eof) {
}
}
auto sequence = _seg_number ? (_file_index > _seg_number ? _file_index - _seg_number : 0LL) : 0LL;
string m3u8;
snprintf(file_content,sizeof(file_content),
"#EXTM3U\n"
......@@ -42,7 +42,7 @@ void HlsMaker::makeIndexFile(bool eof) {
"#EXT-X-TARGETDURATION:%u\n"
"#EXT-X-MEDIA-SEQUENCE:%llu\n",
(maxSegmentDuration + 999) / 1000,
_seg_number ? _file_index : 0);
sequence);
m3u8.assign(file_content);
......
/*
/*
* Copyright (c) 2016 The ZLMediaKit project authors. All Rights Reserved.
*
* This file is part of ZLMediaKit(https://github.com/xiongziliang/ZLMediaKit).
......
/*
/*
* Copyright (c) 2016 The ZLMediaKit project authors. All Rights Reserved.
*
* This file is part of ZLMediaKit(https://github.com/xiongziliang/ZLMediaKit).
......
/*
/*
* Copyright (c) 2016 The ZLMediaKit project authors. All Rights Reserved.
*
* This file is part of ZLMediaKit(https://github.com/xiongziliang/ZLMediaKit).
......
/*
/*
* Copyright (c) 2016 The ZLMediaKit project authors. All Rights Reserved.
*
* This file is part of ZLMediaKit(https://github.com/xiongziliang/ZLMediaKit).
......
......@@ -122,27 +122,68 @@ void MP4Muxer::inputFrame(const Frame::Ptr &frame) {
}
}
static uint8_t getObject(CodecId codecId){
switch (codecId){
case CodecG711A : return MOV_OBJECT_G711a;
case CodecG711U : return MOV_OBJECT_G711u;
case CodecOpus : return MOV_OBJECT_OPUS;
case CodecAAC : return MOV_OBJECT_AAC;
case CodecH264 : return MOV_OBJECT_H264;
case CodecH265 : return MOV_OBJECT_HEVC;
default : return 0;
}
}
void MP4Muxer::stampSync(){
if(_codec_to_trackid.size() < 2){
return;
}
Stamp *audio = nullptr, *video = nullptr;
for(auto &pr : _codec_to_trackid){
switch (getTrackType((CodecId) pr.first)){
case TrackAudio : audio = &pr.second.stamp; break;
case TrackVideo : video = &pr.second.stamp; break;
default : break;
}
}
if(audio && video){
//音频时间戳同步于视频,因为音频时间戳被修改后不影响播放
audio->syncTo(*video);
}
}
void MP4Muxer::addTrack(const Track::Ptr &track) {
auto mp4_object = getObject(track->getCodecId());
if (!mp4_object) {
WarnL << "MP4录制不支持该编码格式:" << track->getCodecName();
return;
}
if (!track->ready()) {
WarnL << "Track[" << track->getCodecName() << "]未就绪";
return;
}
switch (track->getCodecId()) {
case CodecG711A:
case CodecG711U: {
auto audio_track = dynamic_pointer_cast<G711Track>(track);
case CodecG711U:
case CodecOpus: {
auto audio_track = dynamic_pointer_cast<AudioTrack>(track);
if (!audio_track) {
WarnL << "不是G711 Track";
return;
}
if (!audio_track->ready()) {
WarnL << "G711 Track未就绪";
WarnL << "不是音频Track:" << track->getCodecName();
return;
}
auto track_id = mov_writer_add_audio(_mov_writter.get(),
track->getCodecId() == CodecG711A ? MOV_OBJECT_G711a : MOV_OBJECT_G711u,
mp4_object,
audio_track->getAudioChannel(),
audio_track->getAudioSampleBit() * audio_track->getAudioChannel(),
audio_track->getAudioSampleRate(),
nullptr, 0);
if (track_id < 0) {
WarnL << "添加G711 Track失败:" << track_id;
WarnL << "添加Track[" << track->getCodecName() << "]失败:" << track_id;
return;
}
_codec_to_trackid[track->getCodecId()].track_id = track_id;
......@@ -155,16 +196,14 @@ void MP4Muxer::addTrack(const Track::Ptr &track) {
WarnL << "不是AAC Track";
return;
}
if(!audio_track->ready()){
WarnL << "AAC Track未就绪";
return;
}
auto track_id = mov_writer_add_audio(_mov_writter.get(),
MOV_OBJECT_AAC,
mp4_object,
audio_track->getAudioChannel(),
audio_track->getAudioSampleBit() * audio_track->getAudioChannel(),
audio_track->getAudioSampleRate(),
audio_track->getAacCfg().data(), 2);
audio_track->getAacCfg().data(),
audio_track->getAacCfg().size());
if(track_id < 0){
WarnL << "添加AAC Track失败:" << track_id;
return;
......@@ -178,10 +217,6 @@ void MP4Muxer::addTrack(const Track::Ptr &track) {
WarnL << "不是H264 Track";
return;
}
if(!h264_track->ready()){
WarnL << "H264 Track未就绪";
return;
}
struct mpeg4_avc_t avc = {0};
string sps_pps = string("\x00\x00\x00\x01", 4) + h264_track->getSps() +
......@@ -196,7 +231,7 @@ void MP4Muxer::addTrack(const Track::Ptr &track) {
}
auto track_id = mov_writer_add_video(_mov_writter.get(),
MOV_OBJECT_H264,
mp4_object,
h264_track->getVideoWidth(),
h264_track->getVideoHeight(),
extra_data,
......@@ -216,10 +251,6 @@ void MP4Muxer::addTrack(const Track::Ptr &track) {
WarnL << "不是H265 Track";
return;
}
if(!h265_track->ready()){
WarnL << "H265 Track未就绪";
return;
}
struct mpeg4_hevc_t hevc = {0};
string vps_sps_pps = string("\x00\x00\x00\x01", 4) + h265_track->getVps() +
......@@ -235,7 +266,7 @@ void MP4Muxer::addTrack(const Track::Ptr &track) {
}
auto track_id = mov_writer_add_video(_mov_writter.get(),
MOV_OBJECT_HEVC,
mp4_object,
h265_track->getVideoWidth(),
h265_track->getVideoHeight(),
extra_data,
......@@ -248,10 +279,12 @@ void MP4Muxer::addTrack(const Track::Ptr &track) {
_have_video = true;
}
break;
default:
WarnL << "MP4录制不支持该编码格式:" << track->getCodecName();
break;
default: WarnL << "MP4录制不支持该编码格式:" << track->getCodecName(); break;
}
//尝试音视频同步
stampSync();
}
}//namespace mediakit
......
......@@ -45,13 +45,14 @@ public:
private:
void openMP4();
void closeMP4();
void stampSync();
private:
struct track_info{
struct track_info {
int track_id = -1;
Stamp stamp;
};
unordered_map<int,track_info> _codec_to_trackid;
unordered_map<int, track_info> _codec_to_trackid;
List<Frame::Ptr> _frameCached;
bool _started = false;
bool _have_video = false;
......
......@@ -80,11 +80,11 @@ std::shared_ptr<MediaSinkInterface> Recorder::createRecorder(type type, const st
}
static MediaSource::Ptr getMediaSource(const string &vhost, const string &app, const string &stream_id){
auto src = MediaSource::find(RTMP_SCHEMA, vhost, app, stream_id, false);
auto src = MediaSource::find(RTMP_SCHEMA, vhost, app, stream_id);
if(src){
return src;
}
return MediaSource::find(RTSP_SCHEMA, vhost, app, stream_id, false);
return MediaSource::find(RTSP_SCHEMA, vhost, app, stream_id);
}
bool Recorder::isRecording(type type, const string &vhost, const string &app, const string &stream_id){
......
......@@ -23,6 +23,26 @@ TsMuxer::~TsMuxer() {
uninit();
}
void TsMuxer::stampSync(){
if(_codec_to_trackid.size() < 2){
return;
}
Stamp *audio = nullptr, *video = nullptr;
for(auto &pr : _codec_to_trackid){
switch (getTrackType((CodecId) pr.first)){
case TrackAudio : audio = &pr.second.stamp; break;
case TrackVideo : video = &pr.second.stamp; break;
default : break;
}
}
if(audio && video){
//音频时间戳同步于视频,因为音频时间戳被修改后不影响播放
audio->syncTo(*video);
}
}
void TsMuxer::addTrack(const Track::Ptr &track) {
switch (track->getCodecId()) {
case CodecH264: {
......@@ -52,9 +72,11 @@ void TsMuxer::addTrack(const Track::Ptr &track) {
break;
}
default:
break;
default: WarnL << "mpeg-ts 不支持该编码格式,已忽略:" << track->getCodecName(); break;
}
//尝试音视频同步
stampSync();
}
void TsMuxer::inputFrame(const Frame::Ptr &frame) {
......
......@@ -17,33 +17,55 @@
#include "Util/File.h"
#include "Common/MediaSink.h"
#include "Common/Stamp.h"
using namespace toolkit;
namespace mediakit {
//该类用于产生MPEG-TS
class TsMuxer : public MediaSinkInterface {
public:
TsMuxer();
virtual ~TsMuxer();
/**
* 添加音视频轨道
*/
void addTrack(const Track::Ptr &track) override;
/**
* 重置音视频轨道
*/
void resetTracks() override;
/**
* 输入帧数据
*/
void inputFrame(const Frame::Ptr &frame) override;
protected:
/**
* 输出mpegts数据回调
* @param packet mpegts数据
* @param bytes mpegts数据长度
* @param timestamp 时间戳,单位毫秒
* @param is_idr_fast_packet 是否为关键帧的第一个TS包,用于确保ts切片第一帧为关键帧
*/
virtual void onTs(const void *packet, int bytes,uint32_t timestamp,bool is_idr_fast_packet) = 0;
private:
void init();
void uninit();
//音视频时间戳同步用
void stampSync();
private:
void *_context = nullptr;
char *_tsbuf[188];
char _tsbuf[188];
uint32_t _timestamp = 0;
struct track_info{
struct track_info {
int track_id = -1;
Stamp stamp;
};
unordered_map<int,track_info> _codec_to_trackid;
unordered_map<int, track_info> _codec_to_trackid;
List<Frame::Ptr> _frameCached;
bool _is_idr_fast_packet = false;
bool _have_video = false;
......
......@@ -50,6 +50,9 @@ void FlvMuxer::start(const EventPoller::Ptr &poller,const RtmpMediaSource::Ptr &
}
strongSelf->onDetach();
});
//音频同步于视频
_stamp[0].syncTo(_stamp[1]);
_ring_reader->setReadCB([weakSelf](const RtmpMediaSource::RingDataType &pkt){
auto strongSelf = weakSelf.lock();
if(!strongSelf){
......@@ -164,7 +167,7 @@ void FlvMuxer::stop() {
///////////////////////////////////////////////////////FlvRecorder/////////////////////////////////////////////////////
void FlvRecorder::startRecord(const EventPoller::Ptr &poller,const string &vhost, const string &app, const string &stream,const string &file_path) {
startRecord(poller,dynamic_pointer_cast<RtmpMediaSource>(MediaSource::find(RTMP_SCHEMA,vhost,app,stream,false)),file_path);
startRecord(poller,dynamic_pointer_cast<RtmpMediaSource>(MediaSource::find(RTMP_SCHEMA,vhost,app,stream)),file_path);
}
void FlvRecorder::startRecord(const EventPoller::Ptr &poller,const RtmpMediaSource::Ptr &media, const string &file_path) {
......
......@@ -100,4 +100,23 @@ uint8_t getAudioRtmpFlags(const Track::Ptr &track){
}
void Metadata::addTrack(AMFValue &metadata, const Track::Ptr &track) {
Metadata::Ptr new_metadata;
switch (track->getTrackType()) {
case TrackVideo: {
new_metadata = std::make_shared<VideoMeta>(dynamic_pointer_cast<VideoTrack>(track));
}
break;
case TrackAudio: {
new_metadata = std::make_shared<AudioMeta>(dynamic_pointer_cast<AudioTrack>(track));
}
break;
default:
return;
}
new_metadata->getMetadata().object_for_each([&](const std::string &key, const AMFValue &value) {
metadata.set(key, value);
});
}
}//namespace mediakit
\ No newline at end of file
......@@ -220,6 +220,8 @@ public:
const AMFValue &getMetadata() const{
return _metadata;
}
static void addTrack(AMFValue &metadata, const Track::Ptr &track);
protected:
AMFValue _metadata;
};
......@@ -242,18 +244,6 @@ public:
}
}
/**
* 返回音频或视频类型
* @return
*/
TrackType getTrackType() const override {
return TrackTitle;
}
/**
* 返回编码器id
* @return
*/
CodecId getCodecId() const override{
return CodecInvalid;
}
......@@ -266,18 +256,6 @@ public:
VideoMeta(const VideoTrack::Ptr &video,int datarate = 5000);
virtual ~VideoMeta(){}
/**
* 返回音频或视频类型
* @return
*/
TrackType getTrackType() const override {
return TrackVideo;
}
/**
* 返回编码器id
* @return
*/
CodecId getCodecId() const override{
return _codecId;
}
......@@ -285,7 +263,6 @@ private:
CodecId _codecId;
};
class AudioMeta : public Metadata{
public:
typedef std::shared_ptr<AudioMeta> Ptr;
......@@ -294,18 +271,6 @@ public:
virtual ~AudioMeta(){}
/**
* 返回音频或视频类型
* @return
*/
TrackType getTrackType() const override {
return TrackAudio;
}
/**
* 返回编码器id
* @return
*/
CodecId getCodecId() const override{
return _codecId;
}
......@@ -317,7 +282,4 @@ private:
uint8_t getAudioRtmpFlags(const Track::Ptr &track);
}//namespace mediakit
#endif
#endif//__rtmp_h
......@@ -13,60 +13,56 @@
namespace mediakit {
void RtmpDemuxer::loadMetaData(const AMFValue &val){
bool RtmpDemuxer::loadMetaData(const AMFValue &val){
bool ret = false;
try {
int audiosamplerate = 0;
int audiochannels = 0;
int audiosamplesize = 0;
const AMFValue *audiocodecid = nullptr;
const AMFValue *videocodecid = nullptr;
val.object_for_each([&](const string &key, const AMFValue &val) {
if (key == "duration") {
_fDuration = val.as_number();
return;
}
if(key == "audiosamplerate"){
if (key == "audiosamplerate") {
audiosamplerate = val.as_integer();
return;
}
if(key == "audiosamplesize"){
if (key == "audiosamplesize") {
audiosamplesize = val.as_integer();
return;
}
if(key == "stereo"){
if (key == "stereo") {
audiochannels = val.as_boolean() ? 2 : 1;
return;
}
if(key == "videocodecid"){
if (key == "videocodecid") {
//找到视频
videocodecid = &val;
return;
}
if(key == "audiocodecid"){
if (key == "audiocodecid") {
//找到音频
audiocodecid = &val;
return;
}
});
if(videocodecid){
if (videocodecid) {
//有视频
ret = true;
makeVideoTrack(*videocodecid);
}
if(audiocodecid){
if (audiocodecid) {
//有音频
ret = true;
makeAudioTrack(*audiocodecid, audiosamplerate, audiochannels, audiosamplesize);
}
}catch (std::exception &ex){
} catch (std::exception &ex) {
WarnL << ex.what();
}
return ret;
}
bool RtmpDemuxer::inputRtmp(const RtmpPacket::Ptr &pkt) {
......@@ -105,12 +101,11 @@ void RtmpDemuxer::makeVideoTrack(const AMFValue &videoCodec) {
_videoTrack = dynamic_pointer_cast<VideoTrack>(Factory::getVideoTrackByAmf(videoCodec));
if (_videoTrack) {
//生成rtmpCodec对象以便解码rtmp
_videoRtmpDecoder = Factory::getRtmpCodecByTrack(_videoTrack);
_videoRtmpDecoder = Factory::getRtmpCodecByTrack(_videoTrack, false);
if (_videoRtmpDecoder) {
//设置rtmp解码器代理,生成的frame写入该Track
_videoRtmpDecoder->addDelegate(_videoTrack);
onAddTrack(_videoTrack);
_tryedGetVideoTrack = true;
} else {
//找不到相应的rtmp解码器,该track无效
_videoTrack.reset();
......@@ -123,12 +118,11 @@ void RtmpDemuxer::makeAudioTrack(const AMFValue &audioCodec,int sample_rate, int
_audioTrack = dynamic_pointer_cast<AudioTrack>(Factory::getAudioTrackByAmf(audioCodec, sample_rate, channels, sample_bit));
if (_audioTrack) {
//生成rtmpCodec对象以便解码rtmp
_audioRtmpDecoder = Factory::getRtmpCodecByTrack(_audioTrack);
_audioRtmpDecoder = Factory::getRtmpCodecByTrack(_audioTrack, false);
if (_audioRtmpDecoder) {
//设置rtmp解码器代理,生成的frame写入该Track
_audioRtmpDecoder->addDelegate(_audioTrack);
onAddTrack(_audioTrack);
_tryedGetAudioTrack = true;
} else {
//找不到相应的rtmp解码器,该track无效
_audioTrack.reset();
......
......@@ -30,7 +30,7 @@ public:
RtmpDemuxer() = default;
virtual ~RtmpDemuxer() = default;
void loadMetaData(const AMFValue &metadata);
bool loadMetaData(const AMFValue &metadata);
/**
* 开始解复用
......
......@@ -33,9 +33,6 @@ using namespace toolkit;
#define RTMP_GOP_SIZE 512
namespace mediakit {
typedef VideoPacketCache<RtmpPacket> RtmpVideoCache;
typedef AudioPacketCache<RtmpPacket> RtmpAudioCache;
/**
* rtmp媒体源的数据抽象
* rtmp有关键的三要素,分别是metadata、config帧,普通帧
......@@ -43,7 +40,7 @@ typedef AudioPacketCache<RtmpPacket> RtmpAudioCache;
* 只要生成了这三要素,那么要实现rtmp推流、rtmp服务器就很简单了
* rtmp推拉流协议中,先传递metadata,然后传递config帧,然后一直传递普通帧
*/
class RtmpMediaSource : public MediaSource, public RingDelegate<RtmpPacket::Ptr>, public RtmpVideoCache, public RtmpAudioCache{
class RtmpMediaSource : public MediaSource, public RingDelegate<RtmpPacket::Ptr>, public PacketCache<RtmpPacket>{
public:
typedef std::shared_ptr<RtmpMediaSource> Ptr;
typedef std::shared_ptr<List<RtmpPacket::Ptr> > RingDataType;
......@@ -111,6 +108,14 @@ public:
}
/**
* 更新metadata
*/
void updateMetaData(const AMFValue &metadata) {
lock_guard<recursive_mutex> lock(_mtx);
_metadata = metadata;
}
/**
* 输入rtmp包
* @param pkt rtmp包
* @param key 是否为关键帧
......@@ -149,12 +154,7 @@ public:
regist();
}
}
if(pkt->typeId == MSG_VIDEO){
RtmpVideoCache::inputVideo(pkt, key);
}else{
RtmpAudioCache::inputAudio(pkt);
}
PacketCache<RtmpPacket>::inputPacket(pkt->typeId == MSG_VIDEO, pkt, key);
}
/**
......@@ -175,21 +175,13 @@ public:
private:
/**
* 批量flush时间戳相同的视频rtmp包时触发该函数
* @param rtmp_list 时间戳相同的rtmp包列表
* @param key_pos 是否包含关键帧
*/
void onFlushVideo(std::shared_ptr<List<RtmpPacket::Ptr> > &rtmp_list, bool key_pos) override {
_ring->write(rtmp_list, key_pos);
}
/**
* 批量flush一定数量的音频rtmp包时触发该函数
* 批量flush rtmp包时触发该函数
* @param rtmp_list rtmp包列表
* @param key_pos 是否包含关键帧
*/
void onFlushAudio(std::shared_ptr<List<RtmpPacket::Ptr> > &rtmp_list) override{
//只有音频的话,就不存在gop缓存的意义
_ring->write(rtmp_list, !_have_video);
void onFlush(std::shared_ptr<List<RtmpPacket::Ptr> > &rtmp_list, bool key_pos) override {
//如果不存在视频,那么就没有存在GOP缓存的意义,所以is_key一直为true确保一直清空GOP缓存
_ring->write(rtmp_list, _have_video ? key_pos : true);
}
/**
......
......@@ -49,7 +49,11 @@ public:
* 设置metadata
*/
void setMetaData(const AMFValue &metadata) override{
_demuxer->loadMetaData(metadata);
if(!_demuxer->loadMetaData(metadata)){
//该metadata无效,需要重新生成
_metadata = metadata;
_recreate_metadata = true;
}
RtmpMediaSource::setMetaData(metadata);
}
......@@ -146,11 +150,22 @@ public:
void onAllTrackReady() override{
setTrackSource(_muxer);
_all_track_ready = true;
if (_recreate_metadata) {
//更新metadata
for (auto &track : _muxer->getTracks()) {
Metadata::addTrack(_metadata, track);
}
RtmpMediaSource::updateMetaData(_metadata);
}
}
private:
RtmpDemuxer::Ptr _demuxer;
MultiMediaSourceMuxer::Ptr _muxer;
AMFValue _metadata;
bool _all_track_ready = false;
bool _recreate_metadata = false;
};
} /* namespace mediakit */
......
......@@ -23,47 +23,9 @@ RtmpMuxer::RtmpMuxer(const TitleMeta::Ptr &title) {
}
void RtmpMuxer::addTrack(const Track::Ptr &track) {
//根据track生产metadata
Metadata::Ptr metadata;
switch (track->getTrackType()){
case TrackVideo:{
metadata = std::make_shared<VideoMeta>(dynamic_pointer_cast<VideoTrack>(track));
}
break;
case TrackAudio:{
metadata = std::make_shared<AudioMeta>(dynamic_pointer_cast<AudioTrack>(track));
}
break;
default:
return;
}
switch (track->getCodecId()){
case CodecG711A:
case CodecG711U:{
auto audio_track = dynamic_pointer_cast<AudioTrack>(track);
if(!audio_track){
return;
}
if (audio_track->getAudioSampleRate() != 8000 ||
audio_track->getAudioChannel() != 1 ||
audio_track->getAudioSampleBit() != 16) {
WarnL << "RTMP只支持8000/1/16规格的G711,目前规格是:"
<< audio_track->getAudioSampleRate() << "/"
<< audio_track->getAudioChannel() << "/"
<< audio_track->getAudioSampleBit()
<< ",该音频已被忽略";
return;
}
break;
}
default : break;
}
auto &encoder = _encoder[track->getTrackType()];
//生成rtmp编码器,克隆该Track,防止循环引用
encoder = Factory::getRtmpCodecByTrack(track->clone());
encoder = Factory::getRtmpCodecByTrack(track->clone(), true);
if (!encoder) {
return;
}
......@@ -71,10 +33,8 @@ void RtmpMuxer::addTrack(const Track::Ptr &track) {
//设置rtmp输出环形缓存
encoder->setRtmpRing(_rtmpRing);
//添加其metadata
metadata->getMetadata().object_for_each([&](const std::string &key, const AMFValue &value){
_metadata.set(key,value);
});
//添加metadata
Metadata::addTrack(_metadata,track);
}
void RtmpMuxer::inputFrame(const Frame::Ptr &frame) {
......
......@@ -130,8 +130,7 @@ void RtmpSession::onCmd_publish(AMFDecoder &dec) {
auto src = dynamic_pointer_cast<RtmpMediaSource>(MediaSource::find(RTMP_SCHEMA,
_mediaInfo._vhost,
_mediaInfo._app,
_mediaInfo._streamid,
false));
_mediaInfo._streamid));
bool authSuccess = err.empty();
bool ok = (!src && !_pPublisherSrc && authSuccess);
AMFValue status(AMF_OBJECT);
......@@ -158,6 +157,12 @@ void RtmpSession::onCmd_publish(AMFDecoder &dec) {
setSocketFlags();
};
if(_mediaInfo._app.empty() || _mediaInfo._streamid.empty()){
//不允许莫名其妙的推流url
onRes("rtmp推流url非法", false, false, false);
return;
}
Broadcast::PublishAuthInvoker invoker = [weakSelf,onRes,pToken](const string &err,bool enableRtxp,bool enableHls,bool enableMP4){
auto strongSelf = weakSelf.lock();
if(!strongSelf){
......@@ -266,6 +271,8 @@ void RtmpSession::sendPlayResponse(const string &err,const RtmpMediaSource::Ptr
onSendMedia(pkt);
});
//音频同步于视频
_stamp[0].syncTo(_stamp[1]);
_pRingReader = src->getRing()->attach(getPoller());
weak_ptr<RtmpSession> weakSelf = dynamic_pointer_cast<RtmpSession>(shared_from_this());
_pRingReader->setReadCB([weakSelf](const RtmpMediaSource::RingDataType &pkt) {
......
......@@ -44,6 +44,7 @@ inline void AMFValue::destroy() {
break;
}
}
inline void AMFValue::init() {
switch (_type) {
case AMF_OBJECT:
......@@ -60,14 +61,13 @@ inline void AMFValue::init() {
default:
break;
}
}
AMFValue::AMFValue(AMFType type) :
_type(type) {
init();
}
AMFValue::~AMFValue() {
destroy();
}
......@@ -78,7 +78,6 @@ AMFValue::AMFValue(const char *s) :
*_value.string = s;
}
AMFValue::AMFValue(const std::string &s) :
_type(AMF_STRING) {
init();
......@@ -108,15 +107,7 @@ AMFValue::AMFValue(const AMFValue &from) :
*this = from;
}
AMFValue::AMFValue(AMFValue &&from) {
*this = std::forward<AMFValue>(from);
}
AMFValue& AMFValue::operator =(const AMFValue &from) {
return *this = const_cast<AMFValue &&>(from);
}
AMFValue& AMFValue::operator =(AMFValue &&from) {
AMFValue& AMFValue::operator = (const AMFValue &from) {
destroy();
_type = from._type;
init();
......@@ -144,7 +135,6 @@ AMFValue& AMFValue::operator =(AMFValue &&from) {
break;
}
return *this;
}
void AMFValue::clear() {
......@@ -236,7 +226,6 @@ string AMFValue::to_string() const{
}
}
const AMFValue& AMFValue::operator[](const char *str) const {
if (_type != AMF_OBJECT && _type != AMF_ECMA_ARRAY) {
throw std::runtime_error("AMF not a object");
......@@ -338,6 +327,7 @@ AMFEncoder & AMFEncoder::operator <<(const char *s) {
}
return *this;
}
AMFEncoder & AMFEncoder::operator <<(const std::string &s) {
if (!s.empty()) {
buf += char(AMF0_STRING);
......@@ -349,18 +339,22 @@ AMFEncoder & AMFEncoder::operator <<(const std::string &s) {
}
return *this;
}
AMFEncoder & AMFEncoder::operator <<(std::nullptr_t) {
buf += char(AMF0_NULL);
return *this;
}
AMFEncoder & AMFEncoder::write_undefined() {
buf += char(AMF0_UNDEFINED);
return *this;
}
AMFEncoder & AMFEncoder::operator <<(const int n){
return (*this) << (double)n;
}
AMFEncoder & AMFEncoder::operator <<(const double n) {
buf += char(AMF0_NUMBER);
uint64_t encoded = 0;
......
......@@ -40,6 +40,7 @@ public:
typedef std::map<std::string, AMFValue> mapType;
typedef std::vector<AMFValue> arrayType;
~AMFValue();
AMFValue(AMFType type = AMF_NULL);
AMFValue(const char *s);
AMFValue(const std::string &s);
......@@ -47,10 +48,7 @@ public:
AMFValue(int i);
AMFValue(bool b);
AMFValue(const AMFValue &from);
AMFValue(AMFValue &&from);
AMFValue &operator =(const AMFValue &from);
AMFValue &operator =(AMFValue &&from);
~AMFValue();
AMFValue &operator = (const AMFValue &from);
void clear();
AMFType type() const ;
......
......@@ -8,18 +8,211 @@
* may be found in the AUTHORS file in the root of the source tree.
*/
#if defined(ENABLE_RTPPROXY)
#include "Decoder.h"
#include "PSDecoder.h"
#include "TSDecoder.h"
#include "Extension/H264.h"
#include "Extension/H265.h"
#include "Extension/AAC.h"
#include "Extension/G711.h"
#if defined(ENABLE_RTPPROXY) || defined(ENABLE_HLS)
#include "mpeg-ts-proto.h"
#endif
namespace mediakit {
Decoder::Ptr Decoder::createDecoder(Decoder::Type type) {
static Decoder::Ptr createDecoder_l(DecoderImp::Type type) {
switch (type){
case decoder_ps : return std::make_shared<PSDecoder>();
case decoder_ts : return std::make_shared<TSDecoder>();
default : return nullptr;
case DecoderImp::decoder_ps:
#ifdef ENABLE_RTPPROXY
return std::make_shared<PSDecoder>();
#else
WarnL << "创建ps解复用器失败,请打开ENABLE_RTPPROXY然后重新编译";
return nullptr;
#endif//ENABLE_RTPPROXY
case DecoderImp::decoder_ts:
#ifdef ENABLE_HLS
return std::make_shared<TSDecoder>();
#else
WarnL << "创建mpegts解复用器失败,请打开ENABLE_HLS然后重新编译";
return nullptr;
#endif//ENABLE_HLS
default: return nullptr;
}
}
/////////////////////////////////////////////////////////////
DecoderImp::Ptr DecoderImp::createDecoder(Type type, MediaSinkInterface *sink){
auto decoder = createDecoder_l(type);
if(!decoder){
return nullptr;
}
return DecoderImp::Ptr(new DecoderImp(decoder, sink));
}
int DecoderImp::input(const uint8_t *data, int bytes){
return _decoder->input(data, bytes);
}
DecoderImp::DecoderImp(const Decoder::Ptr &decoder, MediaSinkInterface *sink){
_decoder = decoder;
_sink = sink;
_decoder->setOnDecode([this](int stream,int codecid,int flags,int64_t pts,int64_t dts,const void *data,int bytes){
onDecode(stream,codecid,flags,pts,dts,data,bytes);
});
}
#if defined(ENABLE_RTPPROXY) || defined(ENABLE_HLS)
#define SWITCH_CASE(codec_id) case codec_id : return #codec_id
static const char *getCodecName(int codec_id) {
switch (codec_id) {
SWITCH_CASE(PSI_STREAM_MPEG1);
SWITCH_CASE(PSI_STREAM_MPEG2);
SWITCH_CASE(PSI_STREAM_AUDIO_MPEG1);
SWITCH_CASE(PSI_STREAM_MP3);
SWITCH_CASE(PSI_STREAM_AAC);
SWITCH_CASE(PSI_STREAM_MPEG4);
SWITCH_CASE(PSI_STREAM_MPEG4_AAC_LATM);
SWITCH_CASE(PSI_STREAM_H264);
SWITCH_CASE(PSI_STREAM_MPEG4_AAC);
SWITCH_CASE(PSI_STREAM_H265);
SWITCH_CASE(PSI_STREAM_AUDIO_AC3);
SWITCH_CASE(PSI_STREAM_AUDIO_EAC3);
SWITCH_CASE(PSI_STREAM_AUDIO_DTS);
SWITCH_CASE(PSI_STREAM_VIDEO_DIRAC);
SWITCH_CASE(PSI_STREAM_VIDEO_VC1);
SWITCH_CASE(PSI_STREAM_VIDEO_SVAC);
SWITCH_CASE(PSI_STREAM_AUDIO_SVAC);
SWITCH_CASE(PSI_STREAM_AUDIO_G711A);
SWITCH_CASE(PSI_STREAM_AUDIO_G711U);
SWITCH_CASE(PSI_STREAM_AUDIO_G722);
SWITCH_CASE(PSI_STREAM_AUDIO_G723);
SWITCH_CASE(PSI_STREAM_AUDIO_G729);
default : return "unknown codec";
}
}
void FrameMerger::inputFrame(const Frame::Ptr &frame,const function<void(uint32_t dts,uint32_t pts,const Buffer::Ptr &buffer)> &cb){
if (!_frameCached.empty() && _frameCached.back()->dts() != frame->dts()) {
Frame::Ptr back = _frameCached.back();
Buffer::Ptr merged_frame = back;
if(_frameCached.size() != 1){
string merged;
_frameCached.for_each([&](const Frame::Ptr &frame){
merged.append(frame->data(),frame->size());
});
merged_frame = std::make_shared<BufferString>(std::move(merged));
}
cb(back->dts(),back->pts(),merged_frame);
_frameCached.clear();
}
_frameCached.emplace_back(Frame::getCacheAbleFrame(frame));
}
void DecoderImp::onDecode(int stream,int codecid,int flags,int64_t pts,int64_t dts,const void *data,int bytes) {
pts /= 90;
dts /= 90;
switch (codecid) {
case PSI_STREAM_H264: {
if (!_codecid_video) {
//获取到视频
_codecid_video = codecid;
InfoL<< "got video track: H264";
auto track = std::make_shared<H264Track>();
onTrack(track);
}
if (codecid != _codecid_video) {
WarnL<< "video track change to H264 from codecid:" << getCodecName(_codecid_video);
return;
}
auto frame = std::make_shared<H264FrameNoCacheAble>((char *) data, bytes, dts, pts,0);
_merger.inputFrame(frame,[this](uint32_t dts, uint32_t pts, const Buffer::Ptr &buffer) {
onFrame(std::make_shared<H264FrameNoCacheAble>(buffer->data(), buffer->size(), dts, pts, prefixSize(buffer->data(), buffer->size())));
});
break;
}
case PSI_STREAM_H265: {
if (!_codecid_video) {
//获取到视频
_codecid_video = codecid;
InfoL<< "got video track: H265";
auto track = std::make_shared<H265Track>();
onTrack(track);
}
if (codecid != _codecid_video) {
WarnL<< "video track change to H265 from codecid:" << getCodecName(_codecid_video);
return;
}
auto frame = std::make_shared<H265FrameNoCacheAble>((char *) data, bytes, dts, pts, 0);
_merger.inputFrame(frame,[this](uint32_t dts, uint32_t pts, const Buffer::Ptr &buffer) {
onFrame(std::make_shared<H265FrameNoCacheAble>(buffer->data(), buffer->size(), dts, pts, prefixSize(buffer->data(), buffer->size())));
});
break;
}
case PSI_STREAM_AAC: {
if (!_codecid_audio) {
//获取到音频
_codecid_audio = codecid;
InfoL<< "got audio track: AAC";
auto track = std::make_shared<AACTrack>();
onTrack(track);
}
if (codecid != _codecid_audio) {
WarnL<< "audio track change to AAC from codecid:" << getCodecName(_codecid_audio);
return;
}
onFrame(std::make_shared<AACFrameNoCacheAble>((char *) data, bytes, dts, 0, 7));
break;
}
case PSI_STREAM_AUDIO_G711A:
case PSI_STREAM_AUDIO_G711U: {
auto codec = codecid == PSI_STREAM_AUDIO_G711A ? CodecG711A : CodecG711U;
if (!_codecid_audio) {
//获取到音频
_codecid_audio = codecid;
InfoL<< "got audio track: G711";
//G711传统只支持 8000/1/16的规格,FFmpeg貌似做了扩展,但是这里不管它了
auto track = std::make_shared<G711Track>(codec, 8000, 1, 16);
onTrack(track);
}
if (codecid != _codecid_audio) {
WarnL<< "audio track change to G711 from codecid:" << getCodecName(_codecid_audio);
return;
}
auto frame = std::make_shared<G711FrameNoCacheAble>((char *) data, bytes, dts);
frame->setCodec(codec);
onFrame(frame);
break;
}
default:
if(codecid != 0){
WarnL<< "unsupported codec type:" << getCodecName(codecid) << " " << (int)codecid;
}
break;
}
}
#else
void DecoderImp::onDecode(int stream,int codecid,int flags,int64_t pts,int64_t dts,const void *data,int bytes) {}
#endif
void DecoderImp::onTrack(const Track::Ptr &track) {
_sink->addTrack(track);
}
void DecoderImp::onFrame(const Frame::Ptr &frame) {
_sink->inputFrame(frame);
}
}//namespace mediakit
#endif//defined(ENABLE_RTPPROXY)
......@@ -11,31 +11,66 @@
#ifndef ZLMEDIAKIT_DECODER_H
#define ZLMEDIAKIT_DECODER_H
#if defined(ENABLE_RTPPROXY)
#include <stdint.h>
#include <memory>
#include <functional>
#include "Decoder.h"
#include "Common/MediaSink.h"
using namespace std;
namespace mediakit {
class Decoder {
public:
typedef std::shared_ptr<Decoder> Ptr;
typedef enum {
decoder_ts = 0,
decoder_ps
}Type;
typedef std::function<void(int stream,int codecid,int flags,int64_t pts,int64_t dts,const void *data,int bytes)> onDecode;
virtual int input(const uint8_t *data, int bytes) = 0;
virtual void setOnDecode(const onDecode &decode) = 0;
static Ptr createDecoder(Type type);
protected:
Decoder() = default;
virtual ~Decoder() = default;
};
/**
* 合并一些时间戳相同的frame
*/
class FrameMerger {
public:
FrameMerger() = default;
~FrameMerger() = default;
void inputFrame(const Frame::Ptr &frame,const function<void(uint32_t dts,uint32_t pts,const Buffer::Ptr &buffer)> &cb);
private:
List<Frame::Ptr> _frameCached;
};
class DecoderImp{
public:
typedef enum {
decoder_ts = 0,
decoder_ps
}Type;
typedef std::shared_ptr<DecoderImp> Ptr;
~DecoderImp() = default;
static Ptr createDecoder(Type type, MediaSinkInterface *sink);
int input(const uint8_t *data, int bytes);
protected:
void onTrack(const Track::Ptr &track);
void onFrame(const Frame::Ptr &frame);
private:
DecoderImp(const Decoder::Ptr &decoder, MediaSinkInterface *sink);
void onDecode(int stream,int codecid,int flags,int64_t pts,int64_t dts,const void *data,int bytes);
private:
Decoder::Ptr _decoder;
MediaSinkInterface *_sink;
FrameMerger _merger;
int _codecid_video = 0;
int _codecid_audio = 0;
};
}//namespace mediakit
#endif//defined(ENABLE_RTPPROXY)
#endif //ZLMEDIAKIT_DECODER_H
......@@ -16,8 +16,9 @@ using namespace toolkit;
namespace mediakit{
RtpDecoder::RtpDecoder() {
RtpDecoder::RtpDecoder(const char *codec) {
_buffer = std::make_shared<BufferRaw>();
_codec = codec;
}
RtpDecoder::~RtpDecoder() {
......@@ -46,7 +47,7 @@ void RtpDecoder::decodeRtp(const void *data, int bytes) {
uint8_t rtp_type = 0x7F & ((uint8_t *) data)[1];
InfoL << "rtp type:" << (int) rtp_type;
_rtp_decoder = rtp_payload_decode_create(rtp_type, "MP2P", &s_func, this);
_rtp_decoder = rtp_payload_decode_create(rtp_type, _codec.data(), &s_func, this);
if (!_rtp_decoder) {
WarnL << "unsupported rtp type:" << (int) rtp_type << ",size:" << bytes << ",hexdump" << hexdump(data, bytes > 16 ? 16 : bytes);
}
......
......@@ -19,14 +19,15 @@ namespace mediakit{
class RtpDecoder {
public:
RtpDecoder();
RtpDecoder(const char *codec = "MP2P");
virtual ~RtpDecoder();
protected:
void decodeRtp(const void *data, int bytes);
protected:
virtual void onRtpDecode(const uint8_t *packet, int bytes, uint32_t timestamp, int flags) = 0;
private:
void *_rtp_decoder = nullptr;
BufferRaw::Ptr _buffer;
string _codec;
};
}//namespace mediakit
......
......@@ -9,44 +9,13 @@
*/
#if defined(ENABLE_RTPPROXY)
#include "mpeg-ts-proto.h"
#include "RtpProcess.h"
#include "Util/File.h"
#include "Extension/H265.h"
#include "Extension/AAC.h"
#include "Extension/G711.h"
#include "Http/HttpTSPlayer.h"
#define RTP_APP_NAME "rtp"
namespace mediakit{
/**
* 合并一些时间戳相同的frame
*/
class FrameMerger {
public:
FrameMerger() = default;
virtual ~FrameMerger() = default;
void inputFrame(const Frame::Ptr &frame,const function<void(uint32_t dts,uint32_t pts,const Buffer::Ptr &buffer)> &cb){
if (!_frameCached.empty() && _frameCached.back()->dts() != frame->dts()) {
Frame::Ptr back = _frameCached.back();
Buffer::Ptr merged_frame = back;
if(_frameCached.size() != 1){
string merged;
_frameCached.for_each([&](const Frame::Ptr &frame){
merged.append(frame->data(),frame->size());
});
merged_frame = std::make_shared<BufferString>(std::move(merged));
}
cb(back->dts(),back->pts(),merged_frame);
_frameCached.clear();
}
_frameCached.emplace_back(Frame::getCacheAbleFrame(frame));
}
private:
List<Frame::Ptr> _frameCached;
};
string printSSRC(uint32_t ui32Ssrc) {
char tmp[9] = { 0 };
ui32Ssrc = htonl(ui32Ssrc);
......@@ -101,7 +70,6 @@ RtpProcess::RtpProcess(uint32_t ssrc) {
});
}
}
_merger = std::make_shared<FrameMerger>();
}
RtpProcess::~RtpProcess() {
......@@ -147,7 +115,6 @@ bool RtpProcess::inputRtp(const Socket::Ptr &sock, const char *data, int data_le
}
_total_bytes += data_len;
_last_rtp_time.resetTime();
bool ret = handleOneRtp(0,_track,(unsigned char *)data,data_len);
if(dts_out){
*dts_out = _dts;
......@@ -157,12 +124,12 @@ bool RtpProcess::inputRtp(const Socket::Ptr &sock, const char *data, int data_le
//判断是否为ts负载
static inline bool checkTS(const uint8_t *packet, int bytes){
return bytes % 188 == 0 && packet[0] == 0x47;
return bytes % TS_PACKET_SIZE == 0 && packet[0] == TS_SYNC_BYTE;
}
void RtpProcess::onRtpSorted(const RtpPacket::Ptr &rtp, int) {
if(rtp->sequence != _sequence + 1 && rtp->sequence != 0){
WarnP(this) << rtp->sequence << " != " << _sequence << "+1";
if(rtp->sequence != _sequence + 1 && _sequence != 0){
WarnP(this) << "rtp丢包:" << rtp->sequence << " != " << _sequence << "+1" << ",公网环境下请使用tcp方式推流";
}
_sequence = rtp->sequence;
if(_save_file_rtp){
......@@ -179,155 +146,38 @@ void RtpProcess::onRtpDecode(const uint8_t *packet, int bytes, uint32_t timestam
fwrite((uint8_t *)packet,bytes, 1, _save_file_ps.get());
}
if(!_decoder){
if (!_decoder) {
//创建解码器
if(checkTS(packet, bytes)){
if (checkTS(packet, bytes)) {
//猜测是ts负载
InfoP(this) << "judged to be TS";
_decoder = Decoder::createDecoder(Decoder::decoder_ts);
}else{
_decoder = DecoderImp::createDecoder(DecoderImp::decoder_ts, this);
} else {
//猜测是ps负载
InfoP(this) << "judged to be PS";
_decoder = Decoder::createDecoder(Decoder::decoder_ps);
_decoder = DecoderImp::createDecoder(DecoderImp::decoder_ps, this);
}
_decoder->setOnDecode([this](int stream,int codecid,int flags,int64_t pts,int64_t dts,const void *data,int bytes){
onDecode(stream,codecid,flags,pts,dts,data,bytes);
});
}
auto ret = _decoder->input((uint8_t *)packet,bytes);
if(ret != bytes){
if (_decoder) {
auto ret = _decoder->input((uint8_t *) packet, bytes);
if (ret != bytes) {
WarnP(this) << ret << " != " << bytes << " " << flags;
}
}
#define SWITCH_CASE(codec_id) case codec_id : return #codec_id
static const char *getCodecName(int codec_id) {
switch (codec_id) {
SWITCH_CASE(PSI_STREAM_MPEG1);
SWITCH_CASE(PSI_STREAM_MPEG2);
SWITCH_CASE(PSI_STREAM_AUDIO_MPEG1);
SWITCH_CASE(PSI_STREAM_MP3);
SWITCH_CASE(PSI_STREAM_AAC);
SWITCH_CASE(PSI_STREAM_MPEG4);
SWITCH_CASE(PSI_STREAM_MPEG4_AAC_LATM);
SWITCH_CASE(PSI_STREAM_H264);
SWITCH_CASE(PSI_STREAM_MPEG4_AAC);
SWITCH_CASE(PSI_STREAM_H265);
SWITCH_CASE(PSI_STREAM_AUDIO_AC3);
SWITCH_CASE(PSI_STREAM_AUDIO_EAC3);
SWITCH_CASE(PSI_STREAM_AUDIO_DTS);
SWITCH_CASE(PSI_STREAM_VIDEO_DIRAC);
SWITCH_CASE(PSI_STREAM_VIDEO_VC1);
SWITCH_CASE(PSI_STREAM_VIDEO_SVAC);
SWITCH_CASE(PSI_STREAM_AUDIO_SVAC);
SWITCH_CASE(PSI_STREAM_AUDIO_G711A);
SWITCH_CASE(PSI_STREAM_AUDIO_G711U);
SWITCH_CASE(PSI_STREAM_AUDIO_G722);
SWITCH_CASE(PSI_STREAM_AUDIO_G723);
SWITCH_CASE(PSI_STREAM_AUDIO_G729);
default : return "unknown codec";
}
}
void RtpProcess::onDecode(int stream,int codecid,int flags,int64_t pts,int64_t dts,const void *data,int bytes) {
pts /= 90;
dts /= 90;
_stamps[codecid].revise(dts,pts,dts,pts,false);
switch (codecid) {
case PSI_STREAM_H264: {
_dts = dts;
if (!_codecid_video) {
//获取到视频
_codecid_video = codecid;
InfoP(this) << "got video track: H264";
auto track = std::make_shared<H264Track>();
_muxer->addTrack(track);
}
if (codecid != _codecid_video) {
WarnP(this) << "video track change to H264 from codecid:" << getCodecName(_codecid_video);
return;
}
if(_save_file_video){
fwrite((uint8_t *)data,bytes, 1, _save_file_video.get());
}
auto frame = std::make_shared<H264FrameNoCacheAble>((char *) data, bytes, dts, pts,0);
_merger->inputFrame(frame,[this](uint32_t dts, uint32_t pts, const Buffer::Ptr &buffer) {
_muxer->inputFrame(std::make_shared<H264FrameNoCacheAble>(buffer->data(), buffer->size(), dts, pts,4));
});
break;
}
case PSI_STREAM_H265: {
_dts = dts;
if (!_codecid_video) {
//获取到视频
_codecid_video = codecid;
InfoP(this) << "got video track: H265";
auto track = std::make_shared<H265Track>();
_muxer->addTrack(track);
}
if (codecid != _codecid_video) {
WarnP(this) << "video track change to H265 from codecid:" << getCodecName(_codecid_video);
return;
}
if(_save_file_video){
fwrite((uint8_t *)data,bytes, 1, _save_file_video.get());
}
auto frame = std::make_shared<H265FrameNoCacheAble>((char *) data, bytes, dts, pts, 0);
_merger->inputFrame(frame,[this](uint32_t dts, uint32_t pts, const Buffer::Ptr &buffer) {
_muxer->inputFrame(std::make_shared<H265FrameNoCacheAble>(buffer->data(), buffer->size(), dts, pts, 4));
});
break;
}
case PSI_STREAM_AAC: {
_dts = dts;
if (!_codecid_audio) {
//获取到音频
_codecid_audio = codecid;
InfoP(this) << "got audio track: AAC";
auto track = std::make_shared<AACTrack>();
_muxer->addTrack(track);
}
if (codecid != _codecid_audio) {
WarnP(this) << "audio track change to AAC from codecid:" << getCodecName(_codecid_audio);
return;
}
_muxer->inputFrame(std::make_shared<AACFrameNoCacheAble>((char *) data, bytes, dts, 0, 7));
break;
void RtpProcess::inputFrame(const Frame::Ptr &frame){
_last_rtp_time.resetTime();
_dts = frame->dts();
if (_save_file_video && frame->getTrackType() == TrackVideo) {
fwrite((uint8_t *) frame->data(), frame->size(), 1, _save_file_video.get());
}
_muxer->inputFrame(frame);
}
case PSI_STREAM_AUDIO_G711A:
case PSI_STREAM_AUDIO_G711U: {
_dts = dts;
auto codec = codecid == PSI_STREAM_AUDIO_G711A ? CodecG711A : CodecG711U;
if (!_codecid_audio) {
//获取到音频
_codecid_audio = codecid;
InfoP(this) << "got audio track: G711";
//G711传统只支持 8000/1/16的规格,FFmpeg貌似做了扩展,但是这里不管它了
auto track = std::make_shared<G711Track>(codec, 8000, 1, 16);
void RtpProcess::addTrack(const Track::Ptr & track){
_muxer->addTrack(track);
}
if (codecid != _codecid_audio) {
WarnP(this) << "audio track change to G711 from codecid:" << getCodecName(_codecid_audio);
return;
}
_muxer->inputFrame(std::make_shared<G711FrameNoCacheAble>(codec, (char *) data, bytes, dts));
break;
}
default:
if(codecid != 0){
WarnP(this) << "unsupported codec type:" << getCodecName(codecid) << " " << (int)codecid;
}
return;
}
}
bool RtpProcess::alive() {
......@@ -412,6 +262,5 @@ void RtpProcess::emitOnPublish() {
}
}
}//namespace mediakit
#endif//defined(ENABLE_RTPPROXY)
\ No newline at end of file
......@@ -23,8 +23,7 @@ using namespace mediakit;
namespace mediakit{
string printSSRC(uint32_t ui32Ssrc);
class FrameMerger;
class RtpProcess : public RtpReceiver , public RtpDecoder, public SockInfo, public std::enable_shared_from_this<RtpProcess>{
class RtpProcess : public RtpReceiver , public RtpDecoder, public SockInfo, public MediaSinkInterface, public std::enable_shared_from_this<RtpProcess>{
public:
typedef std::shared_ptr<RtpProcess> Ptr;
RtpProcess(uint32_t ssrc);
......@@ -44,7 +43,9 @@ public:
protected:
void onRtpSorted(const RtpPacket::Ptr &rtp, int track_index) override ;
void onRtpDecode(const uint8_t *packet, int bytes, uint32_t timestamp, int flags) override;
void onDecode(int stream,int codecid,int flags,int64_t pts,int64_t dts, const void *data,int bytes);
void inputFrame(const Frame::Ptr &frame) override;
void addTrack(const Track::Ptr & track) override;
void resetTracks() override {};
private:
void emitOnPublish();
......@@ -57,14 +58,10 @@ private:
SdpTrack::Ptr _track;
struct sockaddr *_addr = nullptr;
uint16_t _sequence = 0;
int _codecid_video = 0;
int _codecid_audio = 0;
MultiMediaSourceMuxer::Ptr _muxer;
std::shared_ptr<FrameMerger> _merger;
Ticker _last_rtp_time;
unordered_map<int,Stamp> _stamps;
uint32_t _dts = 0;
Decoder::Ptr _decoder;
DecoderImp::Ptr _decoder;
std::weak_ptr<MediaSourceEvent> _listener;
MediaInfo _media_info;
uint64_t _total_bytes = 0;
......
......@@ -8,34 +8,38 @@
* may be found in the AUTHORS file in the root of the source tree.
*/
#if defined(ENABLE_RTPPROXY)
#include "mpeg-ts.h"
#include "TSDecoder.h"
#define TS_PACKET_SIZE 188
namespace mediakit {
bool TSSegment::isTSPacket(const char *data, int len){
return len == TS_PACKET_SIZE && ((uint8_t*)data)[0] == TS_SYNC_BYTE;
}
void TSSegment::setOnSegment(const TSSegment::onSegment &cb) {
_onSegment = cb;
}
int64_t TSSegment::onRecvHeader(const char *data, uint64_t len) {
if (!isTSPacket(data, len)) {
WarnL << "不是ts包:" << (int) (data[0]) << " " << len;
return 0;
}
_onSegment(data, len);
return 0;
}
const char *TSSegment::onSearchPacketTail(const char *data, int len) {
if (len < _size + 1) {
if (len == _size && ((uint8_t *) data)[0] == 0x47) {
if (len == _size && ((uint8_t *) data)[0] == TS_SYNC_BYTE) {
return data + _size;
}
return nullptr;
}
//下一个包头
if (((uint8_t *) data)[_size] == 0x47) {
if (((uint8_t *) data)[_size] == TS_SYNC_BYTE) {
return data + _size;
}
auto pos = memchr(data + _size, 0x47, len - _size);
auto pos = memchr(data + _size, TS_SYNC_BYTE, len - _size);
if (pos) {
return (char *) pos;
}
......@@ -44,12 +48,10 @@ const char *TSSegment::onSearchPacketTail(const char *data, int len) {
////////////////////////////////////////////////////////////////
TSDecoder::TSDecoder() : _ts_segment(TS_PACKET_SIZE) {
#if defined(ENABLE_HLS)
#include "mpeg-ts.h"
TSDecoder::TSDecoder() : _ts_segment() {
_ts_segment.setOnSegment([this](const char *data,uint64_t len){
if(((uint8_t*)data)[0] != 0x47 || len != TS_PACKET_SIZE ){
WarnL << "不是ts包:" << (int)(data[0]) << " " << len;
return;
}
ts_demuxer_input(_demuxer_ctx,(uint8_t*)data,len);
});
_demuxer_ctx = ts_demuxer_create([](void* param, int program, int stream, int codecid, int flags, int64_t pts, int64_t dts, const void* data, size_t bytes){
......@@ -66,8 +68,8 @@ TSDecoder::~TSDecoder() {
}
int TSDecoder::input(const uint8_t *data, int bytes) {
if(bytes == TS_PACKET_SIZE && ((uint8_t*)data)[0] == 0x47){
return ts_demuxer_input(_demuxer_ctx,(uint8_t*)data,bytes);
if (TSSegment::isTSPacket((char *)data, bytes)) {
return ts_demuxer_input(_demuxer_ctx, (uint8_t *) data, bytes);
}
_ts_segment.input((char*)data,bytes);
return bytes;
......@@ -76,6 +78,6 @@ int TSDecoder::input(const uint8_t *data, int bytes) {
void TSDecoder::setOnDecode(const Decoder::onDecode &decode) {
_on_decode = decode;
}
#endif//defined(ENABLE_HLS)
}//namespace mediakit
#endif//defined(ENABLE_RTPPROXY)
\ No newline at end of file
......@@ -11,7 +11,6 @@
#ifndef ZLMEDIAKIT_TSDECODER_H
#define ZLMEDIAKIT_TSDECODER_H
#if defined(ENABLE_RTPPROXY)
#include "Util/logger.h"
#include "Http/HttpRequestSplitter.h"
#include "Decoder.h"
......@@ -19,13 +18,17 @@
using namespace toolkit;
namespace mediakit {
//ts包拆分器
#define TS_PACKET_SIZE 188
#define TS_SYNC_BYTE 0x47
//TS包分割器,用于split一个一个的ts包
class TSSegment : public HttpRequestSplitter {
public:
typedef std::function<void(const char *data,uint64_t len)> onSegment;
TSSegment(int size = 188) : _size(size){}
TSSegment(int size = TS_PACKET_SIZE) : _size(size){}
~TSSegment(){}
void setOnSegment(const onSegment &cb);
static bool isTSPacket(const char *data, int len);
protected:
int64_t onRecvHeader(const char *data, uint64_t len) override ;
const char *onSearchPacketTail(const char *data, int len) override ;
......@@ -34,6 +37,7 @@ private:
onSegment _onSegment;
};
#if defined(ENABLE_HLS)
//ts解析器
class TSDecoder : public Decoder {
public:
......@@ -46,7 +50,7 @@ private:
struct ts_demuxer_t* _demuxer_ctx = nullptr;
onDecode _on_decode;
};
#endif//defined(ENABLE_HLS)
}//namespace mediakit
#endif//defined(ENABLE_RTPPROXY)
#endif //ZLMEDIAKIT_TSDECODER_H
......@@ -17,7 +17,9 @@ UdpRecver::UdpRecver() {
}
UdpRecver::~UdpRecver() {
if(_sock){
_sock->setOnRead(nullptr);
}
}
bool UdpRecver::initSock(uint16_t local_port,const char *local_ip) {
......
......@@ -28,18 +28,18 @@ RtpPacket::Ptr RtpInfo::makeRtp(TrackType type, const void* data, unsigned int l
pucRtp[2] = ui16RtpLen >> 8;
pucRtp[3] = ui16RtpLen & 0x00FF;
pucRtp[4] = 0x80;
pucRtp[5] = (mark << 7) | _ui8PlayloadType;
pucRtp[5] = (mark << 7) | _ui8PayloadType;
memcpy(&pucRtp[6], &sq, 2);
memcpy(&pucRtp[8], &ts, 4);
//ssrc
memcpy(&pucRtp[12], &sc, 4);
if(data){
//playload
//payload
memcpy(&pucRtp[16], data, len);
}
rtppkt->PT = _ui8PlayloadType;
rtppkt->PT = _ui8PayloadType;
rtppkt->interleaved = _ui8Interleaved;
rtppkt->mark = mark;
rtppkt->sequence = _ui16Sequence;
......
......@@ -66,7 +66,7 @@ public:
RtpInfo(uint32_t ui32Ssrc,
uint32_t ui32MtuSize,
uint32_t ui32SampleRate,
uint8_t ui8PlayloadType,
uint8_t ui8PayloadType,
uint8_t ui8Interleaved) {
if(ui32Ssrc == 0){
ui32Ssrc = ((uint64_t)this) & 0xFFFFFFFF;
......@@ -74,7 +74,7 @@ public:
_ui32Ssrc = ui32Ssrc;
_ui32SampleRate = ui32SampleRate;
_ui32MtuSize = ui32MtuSize;
_ui8PlayloadType = ui8PlayloadType;
_ui8PayloadType = ui8PayloadType;
_ui8Interleaved = ui8Interleaved;
}
......@@ -84,8 +84,8 @@ public:
return _ui8Interleaved;
}
int getPlayloadType() const {
return _ui8PlayloadType;
int getPayloadType() const {
return _ui8PayloadType;
}
int getSampleRate() const {
......@@ -110,7 +110,7 @@ protected:
uint32_t _ui32Ssrc;
uint32_t _ui32SampleRate;
uint32_t _ui32MtuSize;
uint8_t _ui8PlayloadType;
uint8_t _ui8PayloadType;
uint8_t _ui8Interleaved;
uint16_t _ui16Sequence = 0;
uint32_t _ui32TimeStamp = 0;
......
......@@ -81,6 +81,7 @@ RtpMultiCaster::~RtpMultiCaster() {
_pReader->setDetachCB(nullptr);
DebugL;
}
RtpMultiCaster::RtpMultiCaster(const EventPoller::Ptr &poller,const string &strLocalIp,const string &strVhost,const string &strApp,const string &strStream) {
auto src = dynamic_pointer_cast<RtspMediaSource>(MediaSource::find(RTSP_SCHEMA,strVhost,strApp, strStream));
if(!src){
......
......@@ -34,11 +34,11 @@ bool RtpReceiver::handleOneRtp(int track_index,SdpTrack::Ptr &track, unsigned ch
}
uint8_t padding = 0;
if (rtp_raw_ptr[0] & 0x40) {
if (rtp_raw_ptr[0] & 0x20) {
//获取padding大小
padding = rtp_raw_ptr[rtp_raw_len - 1];
//移除padding flag
rtp_raw_ptr[0] &= ~0x40;
rtp_raw_ptr[0] &= ~0x20;
//移除padding字节
rtp_raw_len -= padding;
}
......
......@@ -365,5 +365,43 @@ bool RtspUrl::setup(bool isSSL, const string &strUrl, const string &strUser, con
return true;
}
}//namespace mediakit
std::pair<Socket::Ptr, Socket::Ptr> makeSockPair_l(const EventPoller::Ptr &poller, const string &local_ip){
auto pSockRtp = std::make_shared<Socket>(poller);
if (!pSockRtp->bindUdpSock(0, local_ip.data())) {
//分配端口失败
throw runtime_error("open udp socket failed");
}
//是否是偶数
bool even_numbers = pSockRtp->get_local_port() % 2 == 0;
auto pSockRtcp = std::make_shared<Socket>(poller);
if (!pSockRtcp->bindUdpSock(pSockRtp->get_local_port() + (even_numbers ? 1 : -1), local_ip.data())) {
//分配端口失败
throw runtime_error("open udp socket failed");
}
if (!even_numbers) {
//如果rtp端口不是偶数,那么与rtcp端口互换,目的是兼容一些要求严格的播放器或服务器
Socket::Ptr tmp = pSockRtp;
pSockRtp = pSockRtcp;
pSockRtcp = tmp;
}
return std::make_pair(pSockRtp, pSockRtcp);
}
std::pair<Socket::Ptr, Socket::Ptr> makeSockPair(const EventPoller::Ptr &poller, const string &local_ip){
int try_count = 0;
while (true) {
try {
return makeSockPair_l(poller, local_ip);
} catch (...) {
if (++try_count == 3) {
throw;
}
WarnL << "open udp socket failed, retry: " << try_count;
}
}
}
}//namespace mediakit
\ No newline at end of file
......@@ -188,11 +188,11 @@ public:
/**
* 构造sdp
* @param sample_rate 采样率
* @param playload_type pt类型
* @param payload_type pt类型
*/
Sdp(uint32_t sample_rate, uint8_t playload_type){
Sdp(uint32_t sample_rate, uint8_t payload_type){
_sample_rate = sample_rate;
_playload_type = playload_type;
_payload_type = payload_type;
}
virtual ~Sdp(){}
......@@ -207,8 +207,8 @@ public:
* 获取pt
* @return
*/
uint8_t getPlayloadType() const{
return _playload_type;
uint8_t getPayloadType() const{
return _payload_type;
}
/**
......@@ -219,7 +219,7 @@ public:
return _sample_rate;
}
private:
uint8_t _playload_type;
uint8_t _payload_type;
uint32_t _sample_rate;
};
......@@ -263,18 +263,7 @@ public:
string getSdp() const override {
return _printer;
}
/**
* 返回音频或视频类型
* @return
*/
TrackType getTrackType() const override {
return TrackTitle;
}
/**
* 返回编码器id
* @return
*/
CodecId getCodecId() const override{
return CodecInvalid;
}
......@@ -282,6 +271,7 @@ private:
_StrPrinter _printer;
};
} //namespace mediakit
std::pair<Socket::Ptr, Socket::Ptr> makeSockPair(const EventPoller::Ptr &poller, const string &local_ip);
} //namespace mediakit
#endif //RTSP_RTSP_H_
......@@ -30,16 +30,13 @@ using namespace toolkit;
#define RTP_GOP_SIZE 512
namespace mediakit {
typedef VideoPacketCache<RtpPacket> RtpVideoCache;
typedef AudioPacketCache<RtpPacket> RtpAudioCache;
/**
/**
* rtsp媒体源的数据抽象
* rtsp有关键的两要素,分别是sdp、rtp包
* 只要生成了这两要素,那么要实现rtsp推流、rtsp服务器就很简单了
* rtsp推拉流协议中,先传递sdp,然后再协商传输方式(tcp/udp/组播),最后一直传递rtp
*/
class RtspMediaSource : public MediaSource, public RingDelegate<RtpPacket::Ptr>, public RtpVideoCache, public RtpAudioCache {
class RtspMediaSource : public MediaSource, public RingDelegate<RtpPacket::Ptr>, public PacketCache<RtpPacket> {
public:
typedef ResourcePool<RtpPacket> PoolType;
typedef std::shared_ptr<RtspMediaSource> Ptr;
......@@ -175,32 +172,19 @@ public:
regist();
}
}
if(rtp->type == TrackVideo){
RtpVideoCache::inputVideo(rtp, keyPos);
}else{
RtpAudioCache::inputAudio(rtp);
}
PacketCache<RtpPacket>::inputPacket(rtp->type == TrackVideo, rtp, keyPos);
}
private:
/**
* 批量flush时间戳相同的视频rtp包时触发该函数
* @param rtp_list 时间戳相同的rtp包列表
* @param key_pos 是否包含关键帧
*/
void onFlushVideo(std::shared_ptr<List<RtpPacket::Ptr> > &rtp_list, bool key_pos) override {
_ring->write(rtp_list, key_pos);
}
/**
* 批量flush一定数量的音频rtp包时触发该函数
* 批量flush rtp包时触发该函数
* @param rtp_list rtp包列表
* @param key_pos 是否包含关键帧
*/
void onFlushAudio(std::shared_ptr<List<RtpPacket::Ptr> > &rtp_list) override{
//只有音频的话,就不存在gop缓存的意义
_ring->write(rtp_list, !_have_video);
void onFlush(std::shared_ptr<List<RtpPacket::Ptr> > &rtp_list, bool key_pos) override {
//如果不存在视频,那么就没有存在GOP缓存的意义,所以is_key一直为true确保一直清空GOP缓存
_ring->write(rtp_list, _have_video ? key_pos : true);
}
/**
......
......@@ -10,14 +10,11 @@
#include <set>
#include <cmath>
#include <stdarg.h>
#include <algorithm>
#include <iomanip>
#include "Common/config.h"
#include "RtspPlayer.h"
#include "Util/MD5.h"
#include "Util/mini.h"
#include "Util/util.h"
#include "Util/base64.h"
#include "Network/sockutil.h"
......@@ -40,28 +37,28 @@ RtspPlayer::~RtspPlayer(void) {
}
void RtspPlayer::teardown(){
if (alive()) {
sendRtspRequest("TEARDOWN" ,_strContentBase);
sendRtspRequest("TEARDOWN" ,_content_base);
shutdown(SockException(Err_shutdown,"teardown"));
}
_rtspMd5Nonce.clear();
_rtspRealm.clear();
_aTrackInfo.clear();
_strSession.clear();
_strContentBase.clear();
_md5_nonce.clear();
_realm.clear();
_sdp_track.clear();
_session_id.clear();
_content_base.clear();
RtpReceiver::clear();
CLEAR_ARR(_apRtpSock);
CLEAR_ARR(_apRtcpSock);
CLEAR_ARR(_aui16FirstSeq)
CLEAR_ARR(_aui64RtpRecv)
CLEAR_ARR(_aui64RtpRecv)
CLEAR_ARR(_aui16NowSeq)
_pPlayTimer.reset();
_pRtpTimer.reset();
_uiCseq = 1;
_onHandshake = nullptr;
CLEAR_ARR(_rtp_sock);
CLEAR_ARR(_rtcp_sock);
CLEAR_ARR(_rtp_seq_start)
CLEAR_ARR(_rtp_recv_count)
CLEAR_ARR(_rtp_recv_count)
CLEAR_ARR(_rtp_seq_now)
_play_check_timer.reset();
_rtp_check_timer.reset();
_cseq_send = 1;
_on_response = nullptr;
}
void RtspPlayer::play(const string &strUrl){
......@@ -81,20 +78,20 @@ void RtspPlayer::play(const string &strUrl){
(*this)[kRtspPwdIsMD5] = false;
}
_strUrl = url._url;
_eType = (Rtsp::eRtpType)(int)(*this)[kRtpType];
DebugL << url._url << " " << (url._user.size() ? url._user : "null") << " " << (url._passwd.size() ? url._passwd : "null") << " " << _eType;
_play_url = url._url;
_rtp_type = (Rtsp::eRtpType)(int)(*this)[kRtpType];
DebugL << url._url << " " << (url._user.size() ? url._user : "null") << " " << (url._passwd.size() ? url._passwd : "null") << " " << _rtp_type;
weak_ptr<RtspPlayer> weakSelf = dynamic_pointer_cast<RtspPlayer>(shared_from_this());
float playTimeOutSec = (*this)[kTimeoutMS].as<int>() / 1000.0;
_pPlayTimer.reset( new Timer(playTimeOutSec, [weakSelf]() {
_play_check_timer.reset(new Timer(playTimeOutSec, [weakSelf]() {
auto strongSelf=weakSelf.lock();
if(!strongSelf) {
return false;
}
strongSelf->onPlayResult_l(SockException(Err_timeout,"play rtsp timeout"),false);
return false;
},getPoller()));
}, getPoller()));
if(!(*this)[kNetAdapter].empty()){
setNetAdapter((*this)[kNetAdapter]);
......@@ -107,14 +104,13 @@ void RtspPlayer::onConnect(const SockException &err){
onPlayResult_l(err,false);
return;
}
sendDescribe();
sendOptions();
}
void RtspPlayer::onRecv(const Buffer::Ptr& pBuf) {
if(_benchmark_mode && !_pPlayTimer){
if(_benchmark_mode && !_play_check_timer){
//在性能测试模式下,如果rtsp握手完毕后,不再解析rtp包
_rtpTicker.resetTime();
_rtp_recv_ticker.resetTime();
return;
}
input(pBuf->data(),pBuf->size());
......@@ -122,12 +118,12 @@ void RtspPlayer::onRecv(const Buffer::Ptr& pBuf) {
void RtspPlayer::onErr(const SockException &ex) {
//定时器_pPlayTimer为空后表明握手结束了
onPlayResult_l(ex,!_pPlayTimer);
onPlayResult_l(ex,!_play_check_timer);
}
// from live555
bool RtspPlayer::handleAuthenticationFailure(const string &paramsStr) {
if(!_rtspRealm.empty()){
if(!_realm.empty()){
//已经认证过了
return false;
}
......@@ -142,28 +138,28 @@ bool RtspPlayer::handleAuthenticationFailure(const string &paramsStr) {
});
if (sscanf(paramsStr.data(), "Digest realm=\"%[^\"]\", nonce=\"%[^\"]\", stale=%[a-zA-Z]", realm, nonce, stale) == 3) {
_rtspRealm = (const char *)realm;
_rtspMd5Nonce = (const char *)nonce;
_realm = (const char *)realm;
_md5_nonce = (const char *)nonce;
return true;
}
if (sscanf(paramsStr.data(), "Digest realm=\"%[^\"]\", nonce=\"%[^\"]\"", realm, nonce) == 2) {
_rtspRealm = (const char *)realm;
_rtspMd5Nonce = (const char *)nonce;
_realm = (const char *)realm;
_md5_nonce = (const char *)nonce;
return true;
}
if (sscanf(paramsStr.data(), "Basic realm=\"%[^\"]\"", realm) == 1) {
_rtspRealm = (const char *)realm;
_realm = (const char *)realm;
return true;
}
return false;
}
void RtspPlayer::handleResDESCRIBE(const Parser& parser) {
bool RtspPlayer::handleResponse(const string &cmd, const Parser &parser){
string authInfo = parser["WWW-Authenticate"];
//发送DESCRIBE命令后的回复
if ((parser.Url() == "401") && handleAuthenticationFailure(authInfo)) {
sendDescribe();
return;
sendOptions();
return false;
}
if(parser.Url() == "302" || parser.Url() == "301"){
auto newUrl = parser["Location"];
......@@ -171,36 +167,36 @@ void RtspPlayer::handleResDESCRIBE(const Parser& parser) {
throw std::runtime_error("未找到Location字段(跳转url)");
}
play(newUrl);
return;
return false;
}
if (parser.Url() != "200") {
throw std::runtime_error(
StrPrinter << "DESCRIBE:" << parser.Url() << " " << parser.Tail() << endl);
throw std::runtime_error(StrPrinter << cmd << ":" << parser.Url() << " " << parser.Tail() << endl);
}
_strContentBase = parser["Content-Base"];
return true;
}
if(_strContentBase.empty()){
_strContentBase = _strUrl;
void RtspPlayer::handleResDESCRIBE(const Parser& parser) {
if (!handleResponse("DESCRIBE", parser)) {
return;
}
if (_strContentBase.back() == '/') {
_strContentBase.pop_back();
_content_base = parser["Content-Base"];
if(_content_base.empty()){
_content_base = _play_url;
}
if (_content_base.back() == '/') {
_content_base.pop_back();
}
SdpParser sdpParser(parser.Content());
//解析sdp
_aTrackInfo = sdpParser.getAvailableTrack();
_sdp_track = sdpParser.getAvailableTrack();
auto title = sdpParser.getTrack(TrackTitle);
_is_play_back = false;
bool is_play_back = false;
if(title && title->_duration ){
_is_play_back = true;
}
for(auto &stamp : _stamp){
stamp.setPlayBack(_is_play_back);
stamp.setRelativeStamp(0);
is_play_back = true;
}
if (_aTrackInfo.empty()) {
if (_sdp_track.empty()) {
throw std::runtime_error("无有效的Sdp Track");
}
if (!onCheckSDP(sdpParser.toString())) {
......@@ -212,40 +208,21 @@ void RtspPlayer::handleResDESCRIBE(const Parser& parser) {
//有必要的情况下创建udp端口
void RtspPlayer::createUdpSockIfNecessary(int track_idx){
auto &rtpSockRef = _apRtpSock[track_idx];
auto &rtcpSockRef = _apRtcpSock[track_idx];
if(!rtpSockRef){
rtpSockRef.reset(new Socket(getPoller()));
//rtp随机端口
if (!rtpSockRef->bindUdpSock(0, get_local_ip().data())) {
rtpSockRef.reset();
throw std::runtime_error("open rtp sock failed");
}
}
if(!rtcpSockRef){
rtcpSockRef.reset(new Socket(getPoller()));
//rtcp端口为rtp端口+1,目的是为了兼容某些服务器,其实更推荐随机端口
if (!rtcpSockRef->bindUdpSock(rtpSockRef->get_local_port() + 1, get_local_ip().data())) {
rtcpSockRef.reset();
throw std::runtime_error("open rtcp sock failed");
}
}
if(rtpSockRef->get_local_port() % 2 != 0){
//如果rtp端口不是偶数,那么与rtcp端口互换,目的是兼容一些要求严格的服务器
Socket::Ptr tmp = rtpSockRef;
rtpSockRef = rtcpSockRef;
rtcpSockRef = tmp;
auto &rtpSockRef = _rtp_sock[track_idx];
auto &rtcpSockRef = _rtcp_sock[track_idx];
if (!rtpSockRef || !rtcpSockRef) {
auto pr = makeSockPair(getPoller(), get_local_ip());
rtpSockRef = pr.first;
rtcpSockRef = pr.second;
}
}
//发送SETUP命令
void RtspPlayer::sendSetup(unsigned int trackIndex) {
_onHandshake = std::bind(&RtspPlayer::handleResSETUP,this, placeholders::_1,trackIndex);
auto &track = _aTrackInfo[trackIndex];
auto baseUrl = _strContentBase + "/" + track->_control_surffix;
switch (_eType) {
_on_response = std::bind(&RtspPlayer::handleResSETUP, this, placeholders::_1, trackIndex);
auto &track = _sdp_track[trackIndex];
auto baseUrl = _content_base + "/" + track->_control_surffix;
switch (_rtp_type) {
case Rtsp::RTP_TCP: {
sendRtspRequest("SETUP",baseUrl,{"Transport",StrPrinter << "RTP/AVP/TCP;unicast;interleaved=" << track->_type * 2 << "-" << track->_type * 2 + 1});
}
......@@ -256,10 +233,10 @@ void RtspPlayer::sendSetup(unsigned int trackIndex) {
break;
case Rtsp::RTP_UDP: {
createUdpSockIfNecessary(trackIndex);
sendRtspRequest("SETUP",baseUrl,{"Transport",
sendRtspRequest("SETUP", baseUrl, {"Transport",
StrPrinter << "RTP/AVP;unicast;client_port="
<< _apRtpSock[trackIndex]->get_local_port() << "-"
<< _apRtcpSock[trackIndex]->get_local_port()});
<< _rtp_sock[trackIndex]->get_local_port() << "-"
<< _rtcp_sock[trackIndex]->get_local_port()});
}
break;
default:
......@@ -273,34 +250,34 @@ void RtspPlayer::handleResSETUP(const Parser &parser, unsigned int uiTrackIndex)
StrPrinter << "SETUP:" << parser.Url() << " " << parser.Tail() << endl);
}
if (uiTrackIndex == 0) {
_strSession = parser["Session"];
_strSession.append(";");
_strSession = FindField(_strSession.data(), nullptr, ";");
_session_id = parser["Session"];
_session_id.append(";");
_session_id = FindField(_session_id.data(), nullptr, ";");
}
auto strTransport = parser["Transport"];
if(strTransport.find("TCP") != string::npos || strTransport.find("interleaved") != string::npos){
_eType = Rtsp::RTP_TCP;
_rtp_type = Rtsp::RTP_TCP;
}else if(strTransport.find("multicast") != string::npos){
_eType = Rtsp::RTP_MULTICAST;
_rtp_type = Rtsp::RTP_MULTICAST;
}else{
_eType = Rtsp::RTP_UDP;
_rtp_type = Rtsp::RTP_UDP;
}
RtspSplitter::enableRecvRtp(_eType == Rtsp::RTP_TCP);
RtspSplitter::enableRecvRtp(_rtp_type == Rtsp::RTP_TCP);
if(_eType == Rtsp::RTP_TCP) {
if(_rtp_type == Rtsp::RTP_TCP) {
string interleaved = FindField( FindField((strTransport + ";").data(), "interleaved=", ";").data(), NULL, "-");
_aTrackInfo[uiTrackIndex]->_interleaved = atoi(interleaved.data());
_sdp_track[uiTrackIndex]->_interleaved = atoi(interleaved.data());
}else{
const char *strPos = (_eType == Rtsp::RTP_MULTICAST ? "port=" : "server_port=") ;
const char *strPos = (_rtp_type == Rtsp::RTP_MULTICAST ? "port=" : "server_port=") ;
auto port_str = FindField((strTransport + ";").data(), strPos, ";");
uint16_t rtp_port = atoi(FindField(port_str.data(), NULL, "-").data());
uint16_t rtcp_port = atoi(FindField(port_str.data(), "-",NULL).data());
auto &pRtpSockRef = _apRtpSock[uiTrackIndex];
auto &pRtcpSockRef = _apRtcpSock[uiTrackIndex];
auto &pRtpSockRef = _rtp_sock[uiTrackIndex];
auto &pRtcpSockRef = _rtcp_sock[uiTrackIndex];
if (_eType == Rtsp::RTP_MULTICAST) {
if (_rtp_type == Rtsp::RTP_MULTICAST) {
//udp组播
auto multiAddr = FindField((strTransport + ";").data(), "destination=", ";");
pRtpSockRef.reset(new Socket(getPoller()));
......@@ -342,7 +319,7 @@ void RtspPlayer::handleResSETUP(const Parser &parser, unsigned int uiTrackIndex)
WarnL << "收到其他地址的rtp数据:" << SockUtil::inet_ntoa(((struct sockaddr_in *) addr)->sin_addr);
return;
}
strongSelf->handleOneRtp(uiTrackIndex, strongSelf->_aTrackInfo[uiTrackIndex], (unsigned char *) buf->data(), buf->size());
strongSelf->handleOneRtp(uiTrackIndex, strongSelf->_sdp_track[uiTrackIndex], (unsigned char *) buf->data(), buf->size());
});
if(pRtcpSockRef) {
......@@ -356,12 +333,12 @@ void RtspPlayer::handleResSETUP(const Parser &parser, unsigned int uiTrackIndex)
WarnL << "收到其他地址的rtcp数据:" << SockUtil::inet_ntoa(((struct sockaddr_in *) addr)->sin_addr);
return;
}
strongSelf->onRtcpPacket(uiTrackIndex, strongSelf->_aTrackInfo[uiTrackIndex], (unsigned char *) buf->data(), buf->size());
strongSelf->onRtcpPacket(uiTrackIndex, strongSelf->_sdp_track[uiTrackIndex], (unsigned char *) buf->data(), buf->size());
});
}
}
if (uiTrackIndex < _aTrackInfo.size() - 1) {
if (uiTrackIndex < _sdp_track.size() - 1) {
//需要继续发送SETUP命令
sendSetup(uiTrackIndex + 1);
return;
......@@ -373,26 +350,55 @@ void RtspPlayer::handleResSETUP(const Parser &parser, unsigned int uiTrackIndex)
void RtspPlayer::sendDescribe() {
//发送DESCRIBE命令后处理函数:handleResDESCRIBE
_onHandshake = std::bind(&RtspPlayer::handleResDESCRIBE,this, placeholders::_1);
sendRtspRequest("DESCRIBE",_strUrl,{"Accept","application/sdp"});
_on_response = std::bind(&RtspPlayer::handleResDESCRIBE, this, placeholders::_1);
sendRtspRequest("DESCRIBE", _play_url, {"Accept", "application/sdp"});
}
void RtspPlayer::sendOptions(){
_on_response = [this](const Parser& parser){
if (!handleResponse("OPTIONS", parser)) {
return;
}
//获取服务器支持的命令
_supported_cmd.clear();
auto public_val = split(parser["Public"],",");
for(auto &cmd : public_val){
trim(cmd);
_supported_cmd.emplace(cmd);
}
//发送Describe请求,获取sdp
sendDescribe();
};
sendRtspRequest("OPTIONS", _play_url);
}
void RtspPlayer::sendKeepAlive(){
_on_response = [this](const Parser& parser){};
if(_supported_cmd.find("GET_PARAMETER") != _supported_cmd.end()){
//支持GET_PARAMETER,用此命令保活
sendRtspRequest("GET_PARAMETER", _play_url);
}else{
//不支持GET_PARAMETER,用OPTIONS命令保活
sendRtspRequest("OPTIONS", _play_url);
}
}
void RtspPlayer::sendPause(int type , uint32_t seekMS){
_onHandshake = std::bind(&RtspPlayer::handleResPAUSE,this, placeholders::_1,type);
_on_response = std::bind(&RtspPlayer::handleResPAUSE, this, placeholders::_1, type);
//开启或暂停rtsp
switch (type){
case type_pause:
sendRtspRequest("PAUSE", _strContentBase);
sendRtspRequest("PAUSE", _content_base);
break;
case type_play:
sendRtspRequest("PLAY", _strContentBase);
sendRtspRequest("PLAY", _content_base);
break;
case type_seek:
sendRtspRequest("PLAY", _strContentBase, {"Range",StrPrinter << "npt=" << setiosflags(ios::fixed) << setprecision(2) << seekMS / 1000.0 << "-"});
sendRtspRequest("PLAY", _content_base, {"Range",StrPrinter << "npt=" << setiosflags(ios::fixed) << setprecision(2) << seekMS / 1000.0 << "-"});
break;
default:
WarnL << "unknown type : " << type;
_onHandshake = nullptr;
_on_response = nullptr;
break;
}
}
......@@ -419,7 +425,7 @@ void RtspPlayer::handleResPAUSE(const Parser& parser,int type) {
if (type == type_pause) {
//暂停成功!
_pRtpTimer.reset();
_rtp_check_timer.reset();
return;
}
......@@ -436,22 +442,20 @@ void RtspPlayer::handleResPAUSE(const Parser& parser,int type) {
DebugL << "seekTo(ms):" << iSeekTo;
}
//设置相对时间戳
_stamp[0].setRelativeStamp(iSeekTo);
_stamp[1].setRelativeStamp(iSeekTo);
onPlayResult_l(SockException(Err_success, type == type_seek ? "resum rtsp success" : "rtsp play success"), type == type_seek);
}
void RtspPlayer::onWholeRtspPacket(Parser &parser) {
try {
decltype(_onHandshake) fun;
_onHandshake.swap(fun);
if(fun){
fun(parser);
decltype(_on_response) func;
_on_response.swap(func);
if(func){
func(parser);
}
parser.Clear();
} catch (std::exception &err) {
//定时器_pPlayTimer为空后表明握手结束了
onPlayResult_l(SockException(Err_other, err.what()),!_pPlayTimer);
onPlayResult_l(SockException(Err_other, err.what()),!_play_check_timer);
}
}
......@@ -461,12 +465,12 @@ void RtspPlayer::onRtpPacket(const char *data, uint64_t len) {
if(interleaved %2 == 0){
trackIdx = getTrackIndexByInterleaved(interleaved);
if (trackIdx != -1) {
handleOneRtp(trackIdx,_aTrackInfo[trackIdx],(unsigned char *)data + 4, len - 4);
handleOneRtp(trackIdx, _sdp_track[trackIdx], (unsigned char *)data + 4, len - 4);
}
}else{
trackIdx = getTrackIndexByInterleaved(interleaved - 1);
if (trackIdx != -1) {
onRtcpPacket(trackIdx, _aTrackInfo[trackIdx], (unsigned char *) data + 4, len - 4);
onRtcpPacket(trackIdx, _sdp_track[trackIdx], (unsigned char *) data + 4, len - 4);
}
}
}
......@@ -536,8 +540,8 @@ void RtspPlayer::sendReceiverReport(bool overTcp,int iTrackIndex){
static const char s_cname[] = "ZLMediaKitRtsp";
uint8_t aui8Rtcp[4 + 32 + 10 + sizeof(s_cname) + 1] = {0};
uint8_t *pui8Rtcp_RR = aui8Rtcp + 4, *pui8Rtcp_SDES = pui8Rtcp_RR + 32;
auto &track = _aTrackInfo[iTrackIndex];
auto &counter = _aRtcpCnt[iTrackIndex];
auto &track = _sdp_track[iTrackIndex];
auto &counter = _rtcp_counter[iTrackIndex];
aui8Rtcp[0] = '$';
aui8Rtcp[1] = track->_interleaved + 1;
......@@ -593,25 +597,22 @@ void RtspPlayer::sendReceiverReport(bool overTcp,int iTrackIndex){
if(overTcp){
send(obtainBuffer((char *) aui8Rtcp, sizeof(aui8Rtcp)));
}else if(_apRtcpSock[iTrackIndex]) {
_apRtcpSock[iTrackIndex]->send((char *) aui8Rtcp + 4, sizeof(aui8Rtcp) - 4);
}else if(_rtcp_sock[iTrackIndex]) {
_rtcp_sock[iTrackIndex]->send((char *) aui8Rtcp + 4, sizeof(aui8Rtcp) - 4);
}
}
void RtspPlayer::onRtpSorted(const RtpPacket::Ptr &rtppt, int trackidx){
//统计丢包率
if (_aui16FirstSeq[trackidx] == 0 || rtppt->sequence < _aui16FirstSeq[trackidx]) {
_aui16FirstSeq[trackidx] = rtppt->sequence;
_aui64RtpRecv[trackidx] = 0;
if (_rtp_seq_start[trackidx] == 0 || rtppt->sequence < _rtp_seq_start[trackidx]) {
_rtp_seq_start[trackidx] = rtppt->sequence;
_rtp_recv_count[trackidx] = 0;
}
_aui64RtpRecv[trackidx] ++;
_aui16NowSeq[trackidx] = rtppt->sequence;
_rtp_recv_count[trackidx] ++;
_rtp_seq_now[trackidx] = rtppt->sequence;
_stamp[trackidx] = rtppt->timeStamp;
//计算相对时间戳
int64_t dts_out;
_stamp[trackidx].revise(rtppt->timeStamp,rtppt->timeStamp,dts_out,dts_out);
rtppt->timeStamp = dts_out;
onRecvRTP_l(rtppt,_aTrackInfo[trackidx]);
onRecvRTP_l(rtppt, _sdp_track[trackidx]);
}
float RtspPlayer::getPacketLossRate(TrackType type) const{
......@@ -619,9 +620,9 @@ float RtspPlayer::getPacketLossRate(TrackType type) const{
if(iTrackIdx == -1){
uint64_t totalRecv = 0;
uint64_t totalSend = 0;
for (unsigned int i = 0; i < _aTrackInfo.size(); i++) {
totalRecv += _aui64RtpRecv[i];
totalSend += (_aui16NowSeq[i] - _aui16FirstSeq[i] + 1);
for (unsigned int i = 0; i < _sdp_track.size(); i++) {
totalRecv += _rtp_recv_count[i];
totalSend += (_rtp_seq_now[i] - _rtp_seq_start[i] + 1);
}
if(totalSend == 0){
return 0;
......@@ -629,14 +630,14 @@ float RtspPlayer::getPacketLossRate(TrackType type) const{
return 1.0 - (double)totalRecv / totalSend;
}
if(_aui16NowSeq[iTrackIdx] - _aui16FirstSeq[iTrackIdx] + 1 == 0){
if(_rtp_seq_now[iTrackIdx] - _rtp_seq_start[iTrackIdx] + 1 == 0){
return 0;
}
return 1.0 - (double)_aui64RtpRecv[iTrackIdx] / (_aui16NowSeq[iTrackIdx] - _aui16FirstSeq[iTrackIdx] + 1);
return 1.0 - (double)_rtp_recv_count[iTrackIdx] / (_rtp_seq_now[iTrackIdx] - _rtp_seq_start[iTrackIdx] + 1);
}
uint32_t RtspPlayer::getProgressMilliSecond() const{
return MAX(_stamp[0].getRelativeStamp(),_stamp[1].getRelativeStamp());
return MAX(_stamp[0],_stamp[1]);
}
void RtspPlayer::seekToMilliSecond(uint32_t ms) {
......@@ -659,15 +660,15 @@ void RtspPlayer::sendRtspRequest(const string &cmd, const string &url, const std
void RtspPlayer::sendRtspRequest(const string &cmd, const string &url,const StrCaseMap &header_const) {
auto header = header_const;
header.emplace("CSeq",StrPrinter << _uiCseq++);
header.emplace("CSeq",StrPrinter << _cseq_send++);
header.emplace("User-Agent",SERVER_NAME);
if(!_strSession.empty()){
header.emplace("Session",_strSession);
if(!_session_id.empty()){
header.emplace("Session", _session_id);
}
if(!_rtspRealm.empty() && !(*this)[kRtspUser].empty()){
if(!_rtspMd5Nonce.empty()){
if(!_realm.empty() && !(*this)[kRtspUser].empty()){
if(!_md5_nonce.empty()){
//MD5认证
/*
response计算方法如下:
......@@ -679,14 +680,14 @@ void RtspPlayer::sendRtspRequest(const string &cmd, const string &url,const StrC
*/
string encrypted_pwd = (*this)[kRtspPwd];
if(!(*this)[kRtspPwdIsMD5].as<bool>()){
encrypted_pwd = MD5((*this)[kRtspUser]+ ":" + _rtspRealm + ":" + encrypted_pwd).hexdigest();
encrypted_pwd = MD5((*this)[kRtspUser] + ":" + _realm + ":" + encrypted_pwd).hexdigest();
}
auto response = MD5( encrypted_pwd + ":" + _rtspMd5Nonce + ":" + MD5(cmd + ":" + url).hexdigest()).hexdigest();
auto response = MD5(encrypted_pwd + ":" + _md5_nonce + ":" + MD5(cmd + ":" + url).hexdigest()).hexdigest();
_StrPrinter printer;
printer << "Digest ";
printer << "username=\"" << (*this)[kRtspUser] << "\", ";
printer << "realm=\"" << _rtspRealm << "\", ";
printer << "nonce=\"" << _rtspMd5Nonce << "\", ";
printer << "realm=\"" << _realm << "\", ";
printer << "nonce=\"" << _md5_nonce << "\", ";
printer << "uri=\"" << url << "\", ";
printer << "response=\"" << response << "\"";
header.emplace("Authorization",printer);
......@@ -708,25 +709,31 @@ void RtspPlayer::sendRtspRequest(const string &cmd, const string &url,const StrC
}
void RtspPlayer::onRecvRTP_l(const RtpPacket::Ptr &pkt, const SdpTrack::Ptr &track) {
_rtpTicker.resetTime();
onRecvRTP(pkt,track);
_rtp_recv_ticker.resetTime();
onRecvRTP(pkt, track);
int iTrackIndex = getTrackIndexByInterleaved(pkt->interleaved);
if(iTrackIndex == -1){
if (iTrackIndex == -1) {
return;
}
RtcpCounter &counter = _aRtcpCnt[iTrackIndex];
RtcpCounter &counter = _rtcp_counter[iTrackIndex];
counter.pktCnt = pkt->sequence;
auto &ticker = _aRtcpTicker[iTrackIndex];
auto &ticker = _rtcp_send_ticker[iTrackIndex];
if (ticker.elapsedTime() > 5 * 1000) {
//send rtcp every 5 second
counter.lastTimeStamp = counter.timeStamp;
//直接保存网络字节序
memcpy(&counter.timeStamp, pkt->data() + 8 , 4);
if(counter.lastTimeStamp != 0){
sendReceiverReport(_eType == Rtsp::RTP_TCP,iTrackIndex);
memcpy(&counter.timeStamp, pkt->data() + 8, 4);
if (counter.lastTimeStamp != 0) {
sendReceiverReport(_rtp_type == Rtsp::RTP_TCP, iTrackIndex);
ticker.resetTime();
}
//有些rtsp服务器需要rtcp保活,有些需要发送信令保活
if (iTrackIndex == 0) {
//只需要发送一次心跳信令包
sendKeepAlive();
}
}
}
......@@ -735,27 +742,27 @@ void RtspPlayer::onPlayResult_l(const SockException &ex , bool handshakeComplete
if(!ex){
//播放成功,恢复rtp接收超时定时器
_rtpTicker.resetTime();
_rtp_recv_ticker.resetTime();
weak_ptr<RtspPlayer> weakSelf = dynamic_pointer_cast<RtspPlayer>(shared_from_this());
int timeoutMS = (*this)[kMediaTimeoutMS].as<int>();
//创建rtp数据接收超时检测定时器
_pRtpTimer.reset( new Timer(timeoutMS / 2000.0, [weakSelf,timeoutMS]() {
_rtp_check_timer.reset(new Timer(timeoutMS / 2000.0, [weakSelf,timeoutMS]() {
auto strongSelf=weakSelf.lock();
if(!strongSelf) {
return false;
}
if(strongSelf->_rtpTicker.elapsedTime()> timeoutMS) {
if(strongSelf->_rtp_recv_ticker.elapsedTime() > timeoutMS) {
//接收rtp媒体数据包超时
strongSelf->onPlayResult_l(SockException(Err_timeout,"receive rtp timeout"), true);
return false;
}
return true;
},getPoller()));
}, getPoller()));
}
if (!handshakeCompleted) {
//开始播放阶段
_pPlayTimer.reset();
_play_check_timer.reset();
onPlayResult(ex);
//是否为性能测试模式
_benchmark_mode = (*this)[Client::kBenchmarkMode].as<int>();
......@@ -772,25 +779,25 @@ void RtspPlayer::onPlayResult_l(const SockException &ex , bool handshakeComplete
}
}
int RtspPlayer::getTrackIndexByInterleaved(int interleaved) const{
for (unsigned int i = 0; i < _aTrackInfo.size(); i++) {
if (_aTrackInfo[i]->_interleaved == interleaved) {
int RtspPlayer::getTrackIndexByInterleaved(int interleaved) const {
for (unsigned int i = 0; i < _sdp_track.size(); i++) {
if (_sdp_track[i]->_interleaved == interleaved) {
return i;
}
}
if(_aTrackInfo.size() == 1){
if (_sdp_track.size() == 1) {
return 0;
}
return -1;
}
int RtspPlayer::getTrackIndexByTrackType(TrackType trackType) const {
for (unsigned int i = 0; i < _aTrackInfo.size(); i++) {
if (_aTrackInfo[i]->_type == trackType) {
for (unsigned int i = 0; i < _sdp_track.size(); i++) {
if (_sdp_track[i]->_type == trackType) {
return i;
}
}
if(_aTrackInfo.size() == 1){
if (_sdp_track.size() == 1) {
return 0;
}
return -1;
......
......@@ -94,53 +94,57 @@ private:
void handleResDESCRIBE(const Parser &parser);
bool handleAuthenticationFailure(const string &wwwAuthenticateParamsStr);
void handleResPAUSE(const Parser &parser, int type);
bool handleResponse(const string &cmd, const Parser &parser);
//发送SETUP命令
void sendOptions();
void sendSetup(unsigned int uiTrackIndex);
void sendPause(int type , uint32_t ms);
void sendDescribe();
void sendKeepAlive();
void sendRtspRequest(const string &cmd, const string &url ,const StrCaseMap &header = StrCaseMap());
void sendRtspRequest(const string &cmd, const string &url ,const std::initializer_list<string> &header);
void sendReceiverReport(bool overTcp,int iTrackIndex);
void createUdpSockIfNecessary(int track_idx);
private:
string _strUrl;
vector<SdpTrack::Ptr> _aTrackInfo;
function<void(const Parser&)> _onHandshake;
Socket::Ptr _apRtpSock[2]; //RTP端口,trackid idx 为数组下标
Socket::Ptr _apRtcpSock[2];//RTCP端口,trackid idx 为数组下标
string _play_url;
vector<SdpTrack::Ptr> _sdp_track;
function<void(const Parser&)> _on_response;
//RTP端口,trackid idx 为数组下标
Socket::Ptr _rtp_sock[2];
//RTCP端口,trackid idx 为数组下标
Socket::Ptr _rtcp_sock[2];
//rtsp鉴权相关
string _rtspMd5Nonce;
string _rtspRealm;
string _md5_nonce;
string _realm;
//rtsp info
string _strSession;
unsigned int _uiCseq = 1;
string _strContentBase;
Rtsp::eRtpType _eType = Rtsp::RTP_TCP;
string _session_id;
uint32_t _cseq_send = 1;
string _content_base;
Rtsp::eRtpType _rtp_type = Rtsp::RTP_TCP;
/* 丢包率统计需要用到的参数 */
uint16_t _aui16FirstSeq[2] = { 0 , 0};
uint16_t _aui16NowSeq[2] = { 0 , 0 };
uint64_t _aui64RtpRecv[2] = { 0 , 0};
uint16_t _rtp_seq_start[2] = {0, 0};
uint16_t _rtp_seq_now[2] = {0, 0};
uint64_t _rtp_recv_count[2] = {0, 0};
//当前rtp时间戳
uint32_t _stamp[2] = {0, 0};
//超时功能实现
Ticker _rtpTicker;
std::shared_ptr<Timer> _pPlayTimer;
std::shared_ptr<Timer> _pRtpTimer;
//时间戳
Stamp _stamp[2];
Ticker _rtp_recv_ticker;
std::shared_ptr<Timer> _play_check_timer;
std::shared_ptr<Timer> _rtp_check_timer;
//rtcp相关
RtcpCounter _aRtcpCnt[2]; //rtcp统计,trackid idx 为数组下标
Ticker _aRtcpTicker[2]; //rtcp发送时间,trackid idx 为数组下标
//rtcp统计,trackid idx 为数组下标
RtcpCounter _rtcp_counter[2];
//rtcp发送时间,trackid idx 为数组下标
Ticker _rtcp_send_ticker[2];
//是否为rtsp点播
bool _is_play_back;
//是否为性能测试模式
bool _benchmark_mode = false;
//服务器支持的命令
set<string> _supported_cmd;
};
} /* namespace mediakit */
......
......@@ -221,8 +221,7 @@ void RtspSession::handleReq_ANNOUNCE(const Parser &parser) {
auto src = dynamic_pointer_cast<RtmpMediaSource>(MediaSource::find(RTSP_SCHEMA,
_mediaInfo._vhost,
_mediaInfo._app,
_mediaInfo._streamid,
false));
_mediaInfo._streamid));
if(src){
sendRtspResponse("406 Not Acceptable", {"Content-Type", "text/plain"}, "Already publishing.");
string err = StrPrinter << "ANNOUNCE:"
......@@ -240,6 +239,12 @@ void RtspSession::handleReq_ANNOUNCE(const Parser &parser) {
_mediaInfo.parse(full_url);
}
if(_mediaInfo._app.empty() || _mediaInfo._streamid.empty()){
//推流rtsp url必须最少两级(rtsp://host/app/stream_id),不允许莫名其妙的推流url
sendRtspResponse("403 Forbidden", {"Content-Type", "text/plain"}, "rtsp推流url非法,最少确保两级rtsp url");
throw SockException(Err_shutdown,StrPrinter << "rtsp推流url非法:" << full_url);
}
SdpParser sdpParser(parser.Content());
_strSession = makeRandStr(12);
_aTrackInfo = sdpParser.getAvailableTrack();
......@@ -312,39 +317,81 @@ void RtspSession::handleReq_RECORD(const Parser &parser){
}
}
void RtspSession::handleReq_Describe(const Parser &parser) {
void RtspSession::emitOnPlay(){
weak_ptr<RtspSession> weakSelf = dynamic_pointer_cast<RtspSession>(shared_from_this());
//url鉴权回调
auto onRes = [weakSelf](const string &err) {
auto strongSelf = weakSelf.lock();
if (!strongSelf) {
return;
}
if (!err.empty()) {
//播放url鉴权失败
strongSelf->sendRtspResponse("401 Unauthorized", {"Content-Type", "text/plain"}, err);
strongSelf->shutdown(SockException(Err_shutdown, StrPrinter << "401 Unauthorized:" << err));
return;
}
strongSelf->onAuthSuccess();
};
Broadcast::AuthInvoker invoker = [weakSelf, onRes](const string &err) {
auto strongSelf = weakSelf.lock();
if (!strongSelf) {
return;
}
strongSelf->async([onRes, err, weakSelf]() {
onRes(err);
});
};
//广播通用播放url鉴权事件
auto flag = _emit_on_play ? false : NoticeCenter::Instance().emitEvent(Broadcast::kBroadcastMediaPlayed, _mediaInfo, invoker, static_cast<SockInfo &>(*this));
if (!flag) {
//该事件无人监听,默认不鉴权
onRes("");
}
//已经鉴权过了
_emit_on_play = true;
}
void RtspSession::handleReq_Describe(const Parser &parser) {
//该请求中的认证信息
auto authorization = parser["Authorization"];
onGetRealm invoker = [weakSelf,authorization](const string &realm){
weak_ptr<RtspSession> weakSelf = dynamic_pointer_cast<RtspSession>(shared_from_this());
//rtsp专属鉴权是否开启事件回调
onGetRealm invoker = [weakSelf, authorization](const string &realm) {
auto strongSelf = weakSelf.lock();
if(!strongSelf){
if (!strongSelf) {
//本对象已经销毁
return;
}
//切换到自己的线程然后执行
strongSelf->async([weakSelf,realm,authorization](){
strongSelf->async([weakSelf, realm, authorization]() {
auto strongSelf = weakSelf.lock();
if(!strongSelf){
if (!strongSelf) {
//本对象已经销毁
return;
}
if(realm.empty()){
//无需认证,回复sdp
strongSelf->onAuthSuccess();
if (realm.empty()) {
//无需rtsp专属认证, 那么继续url通用鉴权认证(on_play)
strongSelf->emitOnPlay();
return;
}
//该流需要认证
strongSelf->onAuthUser(realm,authorization);
//该流需要rtsp专属认证,开启rtsp专属认证后,将不再触发url通用鉴权认证(on_play)
strongSelf->_rtsp_realm = realm;
strongSelf->onAuthUser(realm, authorization);
});
};
//广播是否需要认证事件
if(!NoticeCenter::Instance().emitEvent(Broadcast::kBroadcastOnGetRtspRealm,_mediaInfo,invoker,static_cast<SockInfo &>(*this))){
if(_rtsp_realm.empty()){
//广播是否需要rtsp专属认证事件
if (!NoticeCenter::Instance().emitEvent(Broadcast::kBroadcastOnGetRtspRealm, _mediaInfo, invoker, static_cast<SockInfo &>(*this))) {
//无人监听此事件,说明无需认证
invoker("");
}
}else{
invoker(_rtsp_realm);
}
}
void RtspSession::onAuthSuccess() {
TraceP(this);
......@@ -627,21 +674,18 @@ void RtspSession::handleReq_Setup(const Parser &parser) {
}
break;
case Rtsp::RTP_UDP: {
//我们用trackIdx区分rtp和rtcp包
auto pSockRtp = std::make_shared<Socket>(_sock->getPoller());
if (!pSockRtp->bindUdpSock(0,get_local_ip().data())) {
std::pair<Socket::Ptr, Socket::Ptr> pr;
try{
pr = makeSockPair(_sock->getPoller(), get_local_ip());
}catch(std::exception &ex) {
//分配端口失败
send_NotAcceptable();
throw SockException(Err_shutdown, "open rtp socket failed");
throw SockException(Err_shutdown, ex.what());
}
auto pSockRtcp = std::make_shared<Socket>(_sock->getPoller());
if (!pSockRtcp->bindUdpSock(pSockRtp->get_local_port() + 1,get_local_ip().data())) {
//分配端口失败
send_NotAcceptable();
throw SockException(Err_shutdown, "open rtcp socket failed");
}
_apRtpSock[trackIdx] = pSockRtp;
_apRtcpSock[trackIdx] = pSockRtcp;
_apRtpSock[trackIdx] = pr.first;
_apRtcpSock[trackIdx] = pr.second;
//设置客户端内网端口信息
string strClientPort = FindField(parser["Transport"].data(), "client_port=", NULL);
uint16_t ui16RtpPort = atoi( FindField(strClientPort.data(), NULL, "-").data());
......@@ -653,23 +697,23 @@ void RtspSession::handleReq_Setup(const Parser &parser) {
peerAddr.sin_port = htons(ui16RtpPort);
peerAddr.sin_addr.s_addr = inet_addr(get_peer_ip().data());
bzero(&(peerAddr.sin_zero), sizeof peerAddr.sin_zero);
pSockRtp->setSendPeerAddr((struct sockaddr *)(&peerAddr));
pr.first->setSendPeerAddr((struct sockaddr *)(&peerAddr));
//设置rtcp发送目标地址
peerAddr.sin_family = AF_INET;
peerAddr.sin_port = htons(ui16RtcpPort);
peerAddr.sin_addr.s_addr = inet_addr(get_peer_ip().data());
bzero(&(peerAddr.sin_zero), sizeof peerAddr.sin_zero);
pSockRtcp->setSendPeerAddr((struct sockaddr *)(&peerAddr));
pr.second->setSendPeerAddr((struct sockaddr *)(&peerAddr));
//尝试获取客户端nat映射地址
startListenPeerUdpData(trackIdx);
//InfoP(this) << "分配端口:" << srv_port;
sendRtspResponse("200 OK",
{"Transport",StrPrinter << "RTP/AVP/UDP;unicast;"
{"Transport", StrPrinter << "RTP/AVP/UDP;unicast;"
<< "client_port=" << strClientPort << ";"
<< "server_port=" << pSockRtp->get_local_port() << "-" << pSockRtcp->get_local_port() << ";"
<< "server_port=" << pr.first->get_local_port() << "-" << pr.second->get_local_port() << ";"
<< "ssrc=" << printSSRC(trackRef->_ssrc)
});
}
......@@ -720,18 +764,8 @@ void RtspSession::handleReq_Setup(const Parser &parser) {
void RtspSession::handleReq_Play(const Parser &parser) {
if (_aTrackInfo.empty() || parser["Session"] != _strSession) {
send_SessionNotFound();
throw SockException(Err_shutdown,_aTrackInfo.empty() ? "can not find any availabe track when play" : "session not found when play");
throw SockException(Err_shutdown,_aTrackInfo.empty() ? "can not find any available track when play" : "session not found when play");
}
auto strRange = parser["Range"];
auto onRes = [this,strRange](const string &err){
bool authSuccess = err.empty();
if(!authSuccess){
//第一次play是播放,否则是恢复播放。只对播放鉴权
sendRtspResponse("401 Unauthorized", {"Content-Type", "text/plain"}, err);
shutdown(SockException(Err_shutdown,StrPrinter << "401 Unauthorized:" << err));
return;
}
auto pMediaSrc = _pMediaSrc.lock();
if(!pMediaSrc){
send_StreamNotFound();
......@@ -742,7 +776,8 @@ void RtspSession::handleReq_Play(const Parser &parser) {
bool useBuf = true;
_enableSendRtp = false;
float iStartTime = 0;
if (strRange.size() && !_bFirstPlay) {
auto strRange = parser["Range"];
if (strRange.size()) {
//这个是seek操作
auto strStart = FindField(strRange.data(), "npt=", "-");
if (strStart == "now") {
......@@ -751,17 +786,16 @@ void RtspSession::handleReq_Play(const Parser &parser) {
iStartTime = 1000 * atof(strStart.data());
InfoP(this) << "rtsp seekTo(ms):" << iStartTime;
useBuf = !pMediaSrc->seekTo(iStartTime);
}else if(pMediaSrc->totalReaderCount() == 0){
} else if (pMediaSrc->totalReaderCount() == 0) {
//第一个消费者
pMediaSrc->seekTo(0);
}
_bFirstPlay = false;
_StrPrinter rtp_info;
for(auto &track : _aTrackInfo){
for (auto &track : _aTrackInfo) {
if (track->_inited == false) {
//还有track没有setup
shutdown(SockException(Err_shutdown,"track not setuped"));
shutdown(SockException(Err_shutdown, "track not setuped"));
return;
}
track->_ssrc = pMediaSrc->getSsrc(track->_type);
......@@ -770,11 +804,10 @@ void RtspSession::handleReq_Play(const Parser &parser) {
rtp_info << "url=" << _strContentBase << "/" << track->_control_surffix << ";"
<< "seq=" << track->_seq << ";"
<< "rtptime=" << (int)(track->_time_stamp * (track->_samplerate / 1000)) << ",";
<< "rtptime=" << (int) (track->_time_stamp * (track->_samplerate / 1000)) << ",";
}
rtp_info.pop_back();
sendRtspResponse("200 OK",
{"Range", StrPrinter << "npt=" << setiosflags(ios::fixed) << setprecision(2) << (useBuf? pMediaSrc->getTimeStamp(TrackInvalid) / 1000.0 : iStartTime / 1000),
"RTP-Info",rtp_info
......@@ -785,51 +818,24 @@ void RtspSession::handleReq_Play(const Parser &parser) {
if (!_pRtpReader && _rtpType != Rtsp::RTP_MULTICAST) {
weak_ptr<RtspSession> weakSelf = dynamic_pointer_cast<RtspSession>(shared_from_this());
_pRtpReader = pMediaSrc->getRing()->attach(getPoller(),useBuf);
_pRtpReader = pMediaSrc->getRing()->attach(getPoller(), useBuf);
_pRtpReader->setDetachCB([weakSelf]() {
auto strongSelf = weakSelf.lock();
if(!strongSelf) {
if (!strongSelf) {
return;
}
strongSelf->shutdown(SockException(Err_shutdown,"rtsp ring buffer detached"));
strongSelf->shutdown(SockException(Err_shutdown, "rtsp ring buffer detached"));
});
_pRtpReader->setReadCB([weakSelf](const RtspMediaSource::RingDataType &pack) {
auto strongSelf = weakSelf.lock();
if(!strongSelf) {
if (!strongSelf) {
return;
}
if(strongSelf->_enableSendRtp) {
if (strongSelf->_enableSendRtp) {
strongSelf->sendRtpPacket(pack);
}
});
}
};
weak_ptr<RtspSession> weakSelf = dynamic_pointer_cast<RtspSession>(shared_from_this());
Broadcast::AuthInvoker invoker = [weakSelf,onRes](const string &err){
auto strongSelf = weakSelf.lock();
if(!strongSelf){
return;
}
strongSelf->async([weakSelf,onRes,err](){
auto strongSelf = weakSelf.lock();
if(!strongSelf){
return;
}
onRes(err);
});
};
if(_bFirstPlay){
//第一次收到play命令,需要鉴权
auto flag = NoticeCenter::Instance().emitEvent(Broadcast::kBroadcastMediaPlayed,_mediaInfo,invoker,static_cast<SockInfo &>(*this));
if(!flag){
//该事件无人监听,默认不鉴权
onRes("");
}
}else{
//后面是seek或恢复命令,不需要鉴权
onRes("");
}
}
void RtspSession::handleReq_Pause(const Parser &parser) {
......
......@@ -160,6 +160,8 @@ private:
void onAuthBasic(const string &realm,const string &strBase64);
//校验md5方式的认证加密
void onAuthDigest(const string &realm,const string &strMd5);
//触发url鉴权事件
void emitOnPlay();
//发送rtp给客户端
void sendRtpPacket(const RtspMediaSource::RingDataType &pkt);
......@@ -179,8 +181,10 @@ private:
string _strContentBase;
//Session号
string _strSession;
//是否第一次播放,第一次播放需要鉴权,第二次播放属于暂停恢复
bool _bFirstPlay = true;
//记录是否需要rtsp专属鉴权,防止重复触发事件
string _rtsp_realm;
//是否已经触发on_play事件
bool _emit_on_play = false;
//url解析后保存的相关信息
MediaInfo _mediaInfo;
//rtsp播放器绑定的直播源
......
......@@ -10,10 +10,29 @@
#include <cstdlib>
#include "RtspSplitter.h"
#include "Util/logger.h"
#include "Util/util.h"
namespace mediakit{
const char *RtspSplitter::onSearchPacketTail(const char *data, int len) {
auto ret = onSearchPacketTail_l(data, len);
if(ret){
return ret;
}
if (len > 256 * 1024) {
//rtp大于256KB
ret = (char *) memchr(data, '$', len);
if (!ret) {
WarnL << "rtp缓存溢出:" << hexdump(data, 1024);
reset();
}
}
return ret;
}
const char *RtspSplitter::onSearchPacketTail_l(const char *data, int len) {
if(!_enableRecvRtp || data[0] != '$'){
//这是rtsp包
_isRtpPacket = false;
......
......@@ -48,6 +48,7 @@ protected:
virtual int64_t getContentLength(Parser &parser);
protected:
const char *onSearchPacketTail(const char *data,int len) override ;
const char *onSearchPacketTail_l(const char *data,int len) ;
int64_t onRecvHeader(const char *data,uint64_t len) override;
void onRecvContent(const char *data,uint64_t len) override;
private:
......
......@@ -13,6 +13,7 @@
#include <string>
#include <memory>
#include <stdexcept>
#include "Extension/Frame.h"
#ifdef __cplusplus
extern "C" {
#endif
......@@ -27,14 +28,24 @@ using namespace std;
namespace mediakit {
class H264Decoder
{
class FFMpegDecoder{
public:
H264Decoder(void){
FFMpegDecoder(int codec_id){
auto ff_codec_id = AV_CODEC_ID_H264;
switch (codec_id){
case CodecH264:
ff_codec_id = AV_CODEC_ID_H264;
break;
case CodecH265:
ff_codec_id = AV_CODEC_ID_H265;
break;
default:
throw std::invalid_argument("不支持该编码格式");
}
avcodec_register_all();
AVCodec *pCodec = avcodec_find_decoder(AV_CODEC_ID_H264);
AVCodec *pCodec = avcodec_find_decoder(ff_codec_id);
if (!pCodec) {
throw std::runtime_error("未找到H264解码器");
throw std::runtime_error("未找到解码器");
}
m_pContext.reset(avcodec_alloc_context3(pCodec), [](AVCodecContext *pCtx) {
avcodec_close(pCtx);
......@@ -57,7 +68,7 @@ public:
throw std::runtime_error("创建帧缓存失败");
}
}
virtual ~H264Decoder(void){}
virtual ~FFMpegDecoder(void){}
bool inputVideo(unsigned char* data,unsigned int dataSize,uint32_t ui32Stamp,AVFrame **ppFrame){
AVPacket pkt;
av_init_packet(&pkt);
......
......@@ -61,40 +61,6 @@ public:
}
};
void get_file_path(const char *path, const char *file_name, char *file_path) {
strcpy(file_path, path);
if (file_path[strlen(file_path) - 1] != '/') {
strcat(file_path, "/");
}
strcat(file_path, file_name);
}
template <typename FUNC>
void for_each_file(const char *path, FUNC &&func){
DIR *dir;
dirent *dir_info;
char file_path[PATH_MAX];
if (File::is_file(path)) {
func(path);
return;
}
if (File::is_dir(path)) {
if ((dir = opendir(path)) == NULL) {
closedir(dir);
return;
}
while ((dir_info = readdir(dir)) != NULL) {
if (File::is_special_dir(dir_info->d_name)) {
continue;
}
get_file_path(path, dir_info->d_name, file_path);
for_each_file(file_path,std::forward<FUNC>(func));
}
closedir(dir);
return;
}
}
static const char s_bom[] = "\xEF\xBB\xBF";
void add_or_rm_bom(const char *file,bool rm_bom){
......@@ -159,23 +125,26 @@ int main(int argc, char *argv[]) {
bool no_filter = filter_set.find("*") != filter_set.end();
//设置日志
Logger::Instance().add(std::make_shared<ConsoleChannel>());
path = File::absolutePath(path, "");
for_each_file(path.data(),[&](const char *path){
if(!no_filter){
File::scanDir(path, [&](const string &path, bool isDir) {
if (isDir) {
return true;
}
if (!no_filter) {
//开启了过滤器
auto pos = strstr(path,".");
if(pos == nullptr){
auto pos = strstr(path.data(), ".");
if (pos == nullptr) {
//没有后缀
return;
return true;
}
auto ext = pos + 1;
if(filter_set.find(ext) == filter_set.end()){
if (filter_set.find(ext) == filter_set.end()) {
//后缀不匹配
return;
return true;
}
}
//该文件匹配
process_file(path,rm_bom);
});
process_file(path.data(), rm_bom);
return true;
}, true);
return 0;
}
......@@ -128,8 +128,6 @@ int main(int argc, char *argv[]) {
bool no_filter = filter_set.find("*") != filter_set.end();
//设置日志
Logger::Instance().add(std::make_shared<ConsoleChannel>());
path = File::absolutePath(path, "");
DebugL << path;
File::scanDir(path, [&](const string &path, bool isDir) {
if (isDir) {
return true;
......
......@@ -44,6 +44,10 @@ int main(int argc, char *argv[]) {
auto playerCnt = atoi(argv[1]);//启动的播放器个数
atomic_int alivePlayerCnt(0);
//由于所有播放器都是再一个timer里面创建的,默认情况下所有播放器会绑定该timer所在的poller线程
//为了提高性能,poller分配策略关闭优先返回当前线程的策略
EventPollerPool::Instance().preferCurrentThread(false);
//每隔若干毫秒启动一个播放器(如果一次性全部启动,服务器和客户端可能都承受不了)
Timer timer0(atoi(argv[2])/1000.0f,[&]() {
MediaPlayer::Ptr player(new MediaPlayer());
......
......@@ -12,13 +12,12 @@
#include "Util/util.h"
#include "Util/logger.h"
#include <iostream>
#include "Poller/EventPoller.h"
#include "Rtsp/UDPServer.h"
#include "Player/MediaPlayer.h"
#include "Util/onceToken.h"
#include "H264Decoder.h"
#include "FFMpegDecoder.h"
#include "YuvDisplayer.h"
#include "Network/sockutil.h"
#include "Extension/H265.h"
using namespace std;
using namespace toolkit;
......@@ -111,36 +110,34 @@ int main(int argc, char *argv[]) {
}
auto viedoTrack = strongPlayer->getTrack(TrackVideo);
if (!viedoTrack || viedoTrack->getCodecId() != CodecH264) {
WarnL << "没有视频或者视频不是264编码!";
if (!viedoTrack) {
WarnL << "没有视频!";
return;
}
AnyStorage::Ptr storage(new AnyStorage);
viedoTrack->addDelegate(std::make_shared<FrameWriterInterfaceHelper>([storage](const Frame::Ptr &frame) {
viedoTrack->addDelegate(std::make_shared<FrameWriterInterfaceHelper>([storage](const Frame::Ptr &frame_in) {
auto frame = Frame::getCacheAbleFrame(frame_in);
SDLDisplayerHelper::Instance().doTask([frame,storage]() {
auto &decoder = (*storage)["decoder"];
auto &displayer = (*storage)["displayer"];
auto &merger = (*storage)["merger"];
if(!decoder){
decoder.set<H264Decoder>();
decoder.set<FFMpegDecoder>(frame->getCodecId());
}
if(!displayer){
displayer.set<YuvDisplayer>(nullptr,url);
}
if(!merger){
merger.set<FrameMerger>();
};
}
merger.get<FrameMerger>().inputFrame(frame,[&](uint32_t dts,uint32_t pts,const Buffer::Ptr &buffer){
AVFrame *pFrame = nullptr;
bool flag = decoder.get<H264Decoder>().inputVideo((unsigned char *) buffer->data(), buffer->size(), dts, &pFrame);
bool flag = decoder.get<FFMpegDecoder>().inputVideo((unsigned char *) buffer->data(), buffer->size(), dts, &pFrame);
if (flag) {
displayer.get<YuvDisplayer>().displayYUV(pFrame);
}
});
return true;
});
}));
......
/*
/*
* Copyright (c) 2016 The ZLMediaKit project authors. All Rights Reserved.
*
* This file is part of ZLMediaKit(https://github.com/xiongziliang/ZLMediaKit).
......@@ -58,7 +58,7 @@ static bool loadFile(const char *path){
RtpSelector::Instance().inputRtp(nullptr,rtp,len, &addr,&timeStamp);
if(timeStamp_last){
auto diff = timeStamp - timeStamp_last;
if(diff > 0){
if(diff > 0 && diff < 500){
usleep(diff * 1000);
}
}
......
......@@ -59,10 +59,11 @@ int main(int argc, char *argv[]) {
Logger::Instance().add(std::make_shared<ConsoleChannel>());
Logger::Instance().setWriter(std::make_shared<AsyncLogWriter>());
{
WebSocketClient<EchoTcpClient>::Ptr client = std::make_shared<WebSocketClient<EchoTcpClient> >();
client->startConnect("121.40.165.18",8800);
client->startConnect("127.0.0.1", 80);
sem.wait();
}
return 0;
}
......@@ -96,26 +96,28 @@ int main(int argc, char *argv[]) {
SSL_Initor::Instance().loadCertificate((exeDir() + "ssl.p12").data());
{
TcpServer::Ptr httpSrv(new TcpServer());
//http服务器,支持websocket
httpSrv->start<WebSocketSessionBase<EchoSessionCreator,HttpSession> >(80);//默认80
httpSrv->start<WebSocketSessionBase<EchoSessionCreator, HttpSession> >(80);//默认80
TcpServer::Ptr httpsSrv(new TcpServer());
//https服务器,支持websocket
httpsSrv->start<WebSocketSessionBase<EchoSessionCreator,HttpsSession> >(443);//默认443
httpsSrv->start<WebSocketSessionBase<EchoSessionCreator, HttpsSession> >(443);//默认443
TcpServer::Ptr httpSrvOld(new TcpServer());
//兼容之前的代码(但是不支持根据url选择生成TcpSession类型)
httpSrvOld->start<WebSocketSession<EchoSession,HttpSession> >(8080);
httpSrvOld->start<WebSocketSession<EchoSession, HttpSession> >(8080);
DebugL << "请打开网页:http://www.websocket-test.com/,进行测试";
DebugL << "连接 ws://127.0.0.1/xxxx,ws://127.0.0.1/ 测试的效果将不同,支持根据url选择不同的处理逻辑";
//设置退出信号处理函数
static semaphore sem;
signal(SIGINT, [](int) { sem.post(); });// 设置退出信号
sem.wait();
}
return 0;
}
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论