Commit a4621896 by baiyfcu Committed by GitHub

Merge pull request #17 from xiongziliang/master

update
parents 05a65d49 f881108d
ZLToolKit @ 5030af90
Subproject commit 4ede70fc435eb0a4d3a752b521170d86440b3935
Subproject commit 5030af90126ea8f01ded6744ae8abdf549d00a81
/*
* Copyright (c) 2016 The ZLMediaKit project authors. All Rights Reserved.
*
* This file is part of ZLMediaKit(https://github.com/xiongziliang/ZLMediaKit).
*
* Use of this source code is governed by MIT license that can be found in the
* LICENSE file in the root of the source tree. All contributing project authors
* may be found in the AUTHORS file in the root of the source tree.
*/
#ifndef ZLMEDIAKIT_ASSERT_H
#define ZLMEDIAKIT_ASSERT_H
#include <stdio.h>
#ifndef NDEBUG
#ifdef assert
#undef assert
#endif//assert
#ifdef __cplusplus
extern "C" {
#endif
extern void Assert_Throw(int failed, const char *exp, const char *func, const char *file, int line);
#ifdef __cplusplus
}
#endif
#define assert(exp) Assert_Throw(!(exp), #exp, __FUNCTION__, __FILE__, __LINE__);
#else
#define assert(e) ((void)0)
#endif//NDEBUG
#endif //ZLMEDIAKIT_ASSERT_H
media-server @ 576216c6
Subproject commit abc08f61bb1250b94d252cfeaea249527912dd3b
Subproject commit 576216c64bf3bcdc5e787da2adb3e169bdd97118
......@@ -39,6 +39,7 @@ set(MediaServer_Root ${CMAKE_CURRENT_SOURCE_DIR}/3rdpart/media-server)
#设置头文件目录
INCLUDE_DIRECTORIES(${ToolKit_Root})
INCLUDE_DIRECTORIES(${MediaKit_Root})
INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/3rdpart)
set(ENABLE_HLS true)
set(ENABLE_OPENSSL true)
......@@ -57,6 +58,8 @@ if (OPENSSL_FOUND AND ENABLE_OPENSSL)
include_directories(${OPENSSL_INCLUDE_DIR})
add_definitions(-DENABLE_OPENSSL)
list(APPEND LINK_LIB_LIST ${OPENSSL_LIBRARIES})
else()
message(WARNING "openssl未找到,rtmp将不支持flash播放器,https/wss/rtsps/rtmps也将失效")
endif ()
#查找mysql是否安装
......@@ -104,9 +107,9 @@ if(ENABLE_HLS)
message(STATUS "ENABLE_HLS defined")
add_definitions(-DENABLE_HLS)
include_directories(${MediaServer_Root}/libmpeg/include)
aux_source_directory(${MediaServer_Root}/libmpeg/include src_mpeg)
aux_source_directory(${MediaServer_Root}/libmpeg/source src_mpeg)
include_directories(${MediaServer_Root}/libmpeg/include)
add_library(mpeg STATIC ${src_mpeg})
list(APPEND LINK_LIB_LIST mpeg)
......@@ -121,13 +124,14 @@ if(ENABLE_MP4)
message(STATUS "ENABLE_MP4 defined")
add_definitions(-DENABLE_MP4)
include_directories(${MediaServer_Root}/libmov/include)
include_directories(${MediaServer_Root}/libflv/include)
aux_source_directory(${MediaServer_Root}/libmov/include src_mov)
aux_source_directory(${MediaServer_Root}/libmov/source src_mov)
include_directories(${MediaServer_Root}/libmov/include)
aux_source_directory(${MediaServer_Root}/libflv/include src_flv)
aux_source_directory(${MediaServer_Root}/libflv/source src_flv)
include_directories(${MediaServer_Root}/libflv/include)
add_library(mov STATIC ${src_mov})
add_library(flv STATIC ${src_flv})
......@@ -141,10 +145,11 @@ endif()
#添加rtp库用于rtp转ps/ts
if(ENABLE_RTPPROXY AND ENABLE_HLS)
message(STATUS "ENABLE_RTPPROXY defined")
include_directories(${MediaServer_Root}/librtp/include)
aux_source_directory(${MediaServer_Root}/librtp/include src_rtp)
aux_source_directory(${MediaServer_Root}/librtp/source src_rtp)
aux_source_directory(${MediaServer_Root}/librtp/payload src_rtp)
include_directories(${MediaServer_Root}/librtp/include)
add_library(rtp STATIC ${src_rtp})
add_definitions(-DENABLE_RTPPROXY)
list(APPEND LINK_LIB_LIST rtp)
......
![logo](https://raw.githubusercontent.com/zlmediakit/ZLMediaKit/master/logo.png)
![logo](https://raw.githubusercontent.com/zlmediakit/ZLMediaKit/master/www/logo.png)
[english readme](https://github.com/xiongziliang/ZLMediaKit/blob/master/README_en.md)
......@@ -30,22 +30,21 @@
## 功能清单
- RTSP
- RTSP 服务器,支持RTMP/MP4转RTSP
- RTSPS 服务器,支持亚马逊echo show这样的设备
- RTSP 播放器,支持RTSP代理,支持生成静音音频
- RTSP 推流客户端与服务器
- RTSP[S]
- RTSP[S] 服务器,支持RTMP/MP4/HLS转RTSP[S],支持亚马逊echo show这样的设备
- RTSP[S] 播放器,支持RTSP代理,支持生成静音音频
- RTSP[S] 推流客户端与服务器
- 支持 `rtp over udp` `rtp over tcp` `rtp over http` `rtp组播` 四种RTP传输方式
- 服务器/客户端完整支持Basic/Digest方式的登录鉴权,全异步可配置化的鉴权接口
- 支持H265编码
- 服务器支持RTSP推流(包括`rtp over udp` `rtp over tcp`方式)
- 支持任意编码格式的rtsp推流,只是除H264/H265/AAC/G711外无法转协议
- RTMP
- RTMP 播放服务器,支持RTSP/MP4转RTMP
- RTMP 发布服务器,支持录制发布流
- RTMP 播放器,支持RTMP代理,支持生成静音音频
- RTMP 推流客户端
- RTMP[S]
- RTMP[S] 播放服务器,支持RTSP/MP4/HLS转RTMP
- RTMP[S] 发布服务器,支持录制发布流
- RTMP[S] 播放器,支持RTMP代理,支持生成静音音频
- RTMP[S] 推流客户端
- 支持http[s]-flv直播
- 支持websocket-flv直播
- 支持任意编码格式的rtmp推流,只是除H264/H265/AAC/G711外无法转协议
......@@ -55,6 +54,7 @@
- 支持HLS文件生成,自带HTTP文件服务器
- 通过cookie追踪技术,可以模拟HLS播放为长连接,实现丰富的业务逻辑
- 支持完备的HLS用户追踪、播放统计等业务功能,可以实现HLS按需拉流等业务
- 支持HLS播发器,支持拉流HLS转rtsp/rtmp/mp4
- HTTP[S]
- 服务器支持`目录索引生成`,`文件下载`,`表单提交请求`
......@@ -71,7 +71,7 @@
- 点播
- 支持录制为FLV/HLS/MP4
- RTSP/RTMP/HTTP-FLV/WS-FLV支持MP4文件点播,支持seek
- 其他
- 支持丰富的restful api以及web hook事件
- 支持简单的telnet调试
......@@ -81,11 +81,15 @@
- 支持按需拉流,无人观看自动关断拉流
- 支持先拉流后推流,提高及时推流画面打开率
- 提供c api sdk
- 支持FFmpeg拉流代理任意格式的流
- 支持http api生成并返回实时截图
## 更新日志
- 2020/5/17 新增支持hls播发器,支持hls拉流代理
## 编译以及测试
请参考wiki:[快速开始](https://github.com/xiongziliang/ZLMediaKit/wiki/%E5%BF%AB%E9%80%9F%E5%BC%80%E5%A7%8B)
**编译前务必仔细参考wiki:[快速开始](https://github.com/xiongziliang/ZLMediaKit/wiki/%E5%BF%AB%E9%80%9F%E5%BC%80%E5%A7%8B)操作!!!**
## 怎么使用
......@@ -114,8 +118,12 @@ bash build_docker_images.sh
- [IOS摄像头实时录制,生成rtsp/rtmp/hls/http-flv](https://gitee.com/xiahcu/IOSMedia)
- [IOS rtmp/rtsp播放器,视频推流器](https://gitee.com/xiahcu/IOSPlayer)
- [支持linux、windows、mac的rtmp/rtsp播放器](https://github.com/xiongziliang/ZLMediaPlayer)
- [配套的管理WEB网站](https://github.com/chenxiaolei/ZLMediaKit_NVR_UI)
- [基于ZLMediaKit分支的管理WEB网站](https://github.com/chenxiaolei/ZLMediaKit_NVR_UI)
- [基于ZLMediaKit主线的管理WEB网站](https://gitee.com/kkkkk5G/MediaServerUI)
- [DotNetCore的RESTful客户端](https://github.com/MingZhuLiu/ZLMediaKit.DotNetCore.Sdk)
- [GB28181-2016网络视频平台](https://github.com/swwheihei/wvp)
- [node-js版本的GB28181平台](https://gitee.com/hfwudao/GB28181_Node_Http)
## 授权协议
......
![logo](https://raw.githubusercontent.com/zlmediakit/ZLMediaKit/master/logo.png)
![logo](https://raw.githubusercontent.com/zlmediakit/ZLMediaKit/master/www/logo.png)
# A lightweight ,high performance and stable stream server and client framework based on C++11.
......@@ -15,18 +15,18 @@
## Features
- RTSP
- RTSP[S]
- RTSP[S] server,support rtsp push.
- RTSP player and pusher.
- RTSP[S] player and pusher.
- RTP Transport : `rtp over udp` `rtp over tcp` `rtp over http` `rtp udp multicast` .
- Basic/Digest/Url Authentication.
- H264/H265/AAC/G711 codec.
- Recorded as mp4.
- Vod of mp4.
- RTMP
- RTMP server,support player and pusher.
- RTMP player and pusher.
- RTMP[S]
- RTMP[S] server,support player and pusher.
- RTMP[S] player and pusher.
- Support HTTP-FLV player.
- H264/H265/AAC/G711 codec.
- Recorded as flv or mp4.
......@@ -36,6 +36,7 @@
- HLS
- RTSP RTMP can be converted into HLS,built-in HTTP server.
- Play authentication based on cookie.
- Support HLS player, support streaming HLS proxy to RTSP / RTMP / MP4.
- HTTP[S]
- HTTP server,suppor directory meun、RESTful http api.
......@@ -53,6 +54,7 @@
- Play and push authentication.
- Pull stream on Demand.
- Support TS / PS streaming push through RTP,and it can be converted to RTSP / RTMP / HLS / FLV.
- Support real-time online screenshot http api.
- Protocol conversion:
......@@ -67,6 +69,7 @@
| RTMP --> MP4 | Y | Y | Y | N |
| MP4 --> RTSP[S] | Y | Y | Y | N |
| MP4 --> RTMP | Y | Y | Y | N |
| HLS --> RTSP/RTMP/MP4 | Y | Y | Y | N |
- Stream generation:
......@@ -106,7 +109,7 @@
| RTMP Pusher | Y |
| HTTP[S] | Y |
| WebSocket[S] | Y |
| HLS player | Y |
## System Requirements
......
......@@ -17,12 +17,12 @@
#ifndef MediaKitApi_STATIC
#if defined(MediaKitApi_EXPORTS)
#define API_EXPORT __declspec(dllexport)
#else
#define API_EXPORT __declspec(dllimport)
#endif
#define API_EXPORT __declspec(dllexport)
#else
#define API_EXPORT __declspec(dllimport)
#endif
#define API_CALL __cdecl
#define API_CALL __cdecl
#else
#define API_EXPORT
#define API_CALL
......
......@@ -36,6 +36,22 @@ API_EXPORT mk_thread API_CALL mk_thread_from_tcp_session(mk_tcp_session ctx);
*/
API_EXPORT mk_thread API_CALL mk_thread_from_tcp_client(mk_tcp_client ctx);
/**
* 根据负载均衡算法,从事件线程池中随机获取一个事件线程
* 如果在事件线程内执行此函数将返回本事件线程
* 事件线程指的是定时器、网络io事件线程
* @return 事件线程
*/
API_EXPORT mk_thread API_CALL mk_thread_from_pool();
/**
* 根据负载均衡算法,从后台线程池中随机获取一个线程
* 后台线程本质与事件线程相同,只是优先级更低,同时可以执行短时间的阻塞任务
* ZLMediaKit中后台线程用于dns解析、mp4点播时的文件解复用
* @return 后台线程
*/
API_EXPORT mk_thread API_CALL mk_thread_from_pool_work();
///////////////////////////////////////////线程切换/////////////////////////////////////////////
typedef void (API_CALL *on_mk_async)(void *user_data);
......
......@@ -144,10 +144,12 @@ API_EXPORT uint16_t API_CALL mk_tcp_server_start(uint16_t port, mk_tcp_type type
s_tcp_server[type]->start<TcpSessionWithSSL<TcpSessionForC> >(port);
break;
case mk_type_ws:
s_tcp_server[type]->start<WebSocketSession<TcpSessionForC, HttpSession>>(port);
//此处你也可以修改WebSocketHeader::BINARY
s_tcp_server[type]->start<WebSocketSession<TcpSessionForC, HttpSession, WebSocketHeader::TEXT> >(port);
break;
case mk_type_wss:
s_tcp_server[type]->start<WebSocketSession<TcpSessionForC, HttpsSession>>(port);
//此处你也可以修改WebSocketHeader::BINARY
s_tcp_server[type]->start<WebSocketSession<TcpSessionForC, HttpsSession, WebSocketHeader::TEXT> >(port);
break;
default:
return 0;
......@@ -208,8 +210,10 @@ TcpClientForC::Ptr *mk_tcp_client_create_l(mk_tcp_client_events *events, mk_tcp_
case mk_type_ssl:
return (TcpClientForC::Ptr *)new shared_ptr<TcpSessionWithSSL<TcpClientForC> >(new TcpSessionWithSSL<TcpClientForC>(events));
case mk_type_ws:
//此处你也可以修改WebSocketHeader::BINARY
return (TcpClientForC::Ptr *)new shared_ptr<WebSocketClient<TcpClientForC, WebSocketHeader::TEXT, false> >(new WebSocketClient<TcpClientForC, WebSocketHeader::TEXT, false>(events));
case mk_type_wss:
//此处你也可以修改WebSocketHeader::BINARY
return (TcpClientForC::Ptr *)new shared_ptr<WebSocketClient<TcpClientForC, WebSocketHeader::TEXT, true> >(new WebSocketClient<TcpClientForC, WebSocketHeader::TEXT, true>(events));
default:
return nullptr;
......
......@@ -12,6 +12,7 @@
#include "mk_tcp_private.h"
#include "Util/logger.h"
#include "Poller/EventPoller.h"
#include "Thread/WorkThreadPool.h"
using namespace std;
using namespace toolkit;
......@@ -27,6 +28,14 @@ API_EXPORT mk_thread API_CALL mk_thread_from_tcp_client(mk_tcp_client ctx){
return (*client)->getPoller().get();
}
API_EXPORT mk_thread API_CALL mk_thread_from_pool(){
return EventPollerPool::Instance().getPoller().get();
}
API_EXPORT mk_thread API_CALL mk_thread_from_pool_work(){
return WorkThreadPool::Instance().getPoller().get();
}
API_EXPORT void API_CALL mk_async_do(mk_thread ctx,on_mk_async cb, void *user_data){
assert(ctx && cb);
EventPoller *poller = (EventPoller *)ctx;
......
......@@ -4,12 +4,18 @@ apiDebug=1
#一些比较敏感的http api在访问时需要提供secret,否则无权限调用
#如果是通过127.0.0.1访问,那么可以不提供secret
secret=035c73f7-bb6b-4889-a715-d9eb2d1925cc
#截图保存路径根目录,截图通过http api(/index/api/getSnap)生成和获取
snapRoot=./www/snap/
#默认截图图片,在启动FFmpeg截图后但是截图还未生成时,可以返回默认的预设图片
defaultSnap=./www/logo.png
[ffmpeg]
#FFmpeg可执行程序绝对路径
bin=/usr/local/bin/ffmpeg
#FFmpeg拉流再推流的命令模板,通过该模板可以设置再编码的一些参数
cmd=%s -re -i %s -c:a aac -strict -2 -ar 44100 -ab 48k -c:v libx264 -f flv %s
#FFmpeg生成截图的命令,可以通过修改该配置改变截图分辨率或质量
snap=%s -i %s -y -f mjpeg -t 0.001 %s
#FFmpeg日志的路径,如果置空则不生成FFmpeg日志
#可以为相对(相对于本可执行程序目录)或绝对路径
log=./ffmpeg/ffmpeg.log
......@@ -43,6 +49,11 @@ publishToMP4=0
#合并写缓存大小(单位毫秒),合并写指服务器缓存一定的数据后才会一次性写入socket,这样能提高性能,但是会提高延时
#开启后会同时关闭TCP_NODELAY并开启MSG_MORE
mergeWriteMS=0
#全局的时间戳覆盖开关,在转协议时,对frame进行时间戳覆盖
#该开关对rtsp/rtmp/rtp推流、rtsp/rtmp/hls拉流代理转协议时生效
#会直接影响rtsp/rtmp/hls/mp4/flv等协议的时间戳
#同协议情况下不影响(例如rtsp/rtmp推流,那么播放rtsp/rtmp时不会影响时间戳)
modifyStamp=0
[hls]
#hls写文件的buf大小,调整参数可以提高文件io性能
......@@ -76,8 +87,9 @@ on_publish=https://127.0.0.1/index/hook/on_publish
on_record_mp4=https://127.0.0.1/index/hook/on_record_mp4
#rtsp播放鉴权事件,此事件中比对rtsp的用户名密码
on_rtsp_auth=https://127.0.0.1/index/hook/on_rtsp_auth
#rtsp播放是否开启鉴权事件,置空则关闭rtsp鉴权。rtsp播放鉴权还支持url方式鉴权
#rtsp播放是否开启专属鉴权事件,置空则关闭rtsp鉴权。rtsp播放鉴权还支持url方式鉴权
#建议开发者统一采用url参数方式鉴权,rtsp用户名密码鉴权一般在设备上用的比较多
#开启rtsp专属鉴权后,将不再触发on_play鉴权事件
on_rtsp_realm=https://127.0.0.1/index/hook/on_rtsp_realm
#远程telnet调试鉴权事件
on_shell_login=https://127.0.0.1/index/hook/on_shell_login
......
......@@ -13,26 +13,27 @@
#include "Common/MediaSource.h"
#include "Util/File.h"
#include "System.h"
#include "Thread/WorkThreadPool.h"
namespace FFmpeg {
#define FFmpeg_FIELD "ffmpeg."
const string kBin = FFmpeg_FIELD"bin";
const string kCmd = FFmpeg_FIELD"cmd";
const string kLog = FFmpeg_FIELD"log";
const string kSnap = FFmpeg_FIELD"snap";
onceToken token([]() {
#ifdef _WIN32
string ffmpeg_bin = System::execute("where ffmpeg");
//windows下先关闭FFmpeg日志(目前不支持日志重定向)
mINI::Instance()[kCmd] = "%s -re -i \"%s\" -loglevel quiet -c:a aac -strict -2 -ar 44100 -ab 48k -c:v libx264 -f flv %s ";
string ffmpeg_bin = trim(System::execute("where ffmpeg"));
#else
string ffmpeg_bin = System::execute("which ffmpeg");
mINI::Instance()[kCmd] = "%s -re -i \"%s\" -c:a aac -strict -2 -ar 44100 -ab 48k -c:v libx264 -f flv %s ";
string ffmpeg_bin = trim(System::execute("which ffmpeg"));
#endif
//默认ffmpeg命令路径为环境变量中路径
mINI::Instance()[kBin] = ffmpeg_bin.empty() ? "ffmpeg" : ffmpeg_bin;
//ffmpeg日志保存路径
mINI::Instance()[kLog] = "./ffmpeg/ffmpeg.log";
mINI::Instance()[kCmd] = "%s -re -i %s -c:a aac -strict -2 -ar 44100 -ab 48k -c:v libx264 -f flv %s";
mINI::Instance()[kSnap] = "%s -i %s -y -f mjpeg -t 0.001 %s";
});
}
......@@ -114,8 +115,7 @@ void FFmpegSource::findAsync(int maxWaitMS, const function<void(const MediaSourc
auto src = MediaSource::find(_media_info._schema,
_media_info._vhost,
_media_info._app,
_media_info._streamid,
false);
_media_info._streamid);
if(src || !maxWaitMS){
cb(src);
return;
......@@ -196,7 +196,19 @@ void FFmpegSource::startTimer(int timeout_ms) {
//推流给其他服务器的,我们通过判断FFmpeg进程是否在线,如果FFmpeg推流中断,那么它应该会自动退出
if (!strongSelf->_process.wait(false)) {
//ffmpeg不在线,重新拉流
strongSelf->play(strongSelf->_src_url, strongSelf->_dst_url, timeout_ms, [](const SockException &) {});
strongSelf->play(strongSelf->_src_url, strongSelf->_dst_url, timeout_ms, [weakSelf](const SockException &ex) {
if(!ex){
//没有错误
return;
}
auto strongSelf = weakSelf.lock();
if (!strongSelf) {
//自身已经销毁
return;
}
//上次重试时间超过10秒,那么再重试FFmpeg拉流
strongSelf->startTimer(10 * 1000);
});
}
}
return true;
......@@ -232,3 +244,31 @@ void FFmpegSource::onGetMediaSource(const MediaSource::Ptr &src) {
_listener = src->getListener();
src->setListener(shared_from_this());
}
void FFmpegSnap::makeSnap(const string &play_url, const string &save_path, float timeout_sec, const function<void(bool)> &cb) {
GET_CONFIG(string,ffmpeg_bin,FFmpeg::kBin);
GET_CONFIG(string,ffmpeg_snap,FFmpeg::kSnap);
GET_CONFIG(string,ffmpeg_log,FFmpeg::kLog);
std::shared_ptr<Process> process = std::make_shared<Process>();
auto delayTask = EventPollerPool::Instance().getPoller()->doDelayTask(timeout_sec * 1000,[process,cb](){
if(process->wait(false)){
//FFmpeg进程还在运行,超时就关闭它
process->kill(2000);
}
return 0;
});
WorkThreadPool::Instance().getPoller()->async([process,play_url,save_path,delayTask,cb](){
char cmd[1024] = {0};
snprintf(cmd, sizeof(cmd),ffmpeg_snap.data(),ffmpeg_bin.data(),play_url.data(),save_path.data());
process->run(cmd,ffmpeg_log.empty() ? "" : File::absolutePath("",ffmpeg_log));
//等待FFmpeg进程退出
process->wait(true);
//FFmpeg进程退出了可以取消定时器了
delayTask->cancel();
//执行回调函数
cb(process->exit_code() == 0);
});
}
......@@ -23,6 +23,23 @@ using namespace std;
using namespace toolkit;
using namespace mediakit;
namespace FFmpeg {
extern const string kSnap;
}
class FFmpegSnap {
public:
/// 创建截图
/// \param play_url 播放url地址,只要FFmpeg支持即可
/// \param save_path 截图jpeg文件保存路径
/// \param timeout_sec 生成截图超时时间(防止阻塞太久)
/// \param cb 生成截图成功与否回调
static void makeSnap(const string &play_url, const string &save_path, float timeout_sec, const function<void(bool)> &cb);
private:
FFmpegSnap() = delete;
~FFmpegSnap() = delete;
};
class FFmpegSource : public std::enable_shared_from_this<FFmpegSource> , public MediaSourceEvent{
public:
typedef shared_ptr<FFmpegSource> Ptr;
......
......@@ -31,6 +31,7 @@ public:
int exit_code();
private:
pid_t _pid = -1;
void *_handle = nullptr;
int _exit_code = 0;
};
......
......@@ -52,7 +52,7 @@ string System::execute(const string &cmd) {
#if !defined(ANDROID) && !defined(_WIN32)
static string addr2line(const string &address) {
string cmd = StrPrinter << "addr2line -e " << exePath() << " " << address;
string cmd = StrPrinter << "addr2line -C -f -e " << exePath() << " " << address;
return System::execute(cmd);
}
......
......@@ -8,11 +8,12 @@
* may be found in the AUTHORS file in the root of the source tree.
*/
#include <sys/stat.h>
#include <math.h>
#include <signal.h>
#include <functional>
#include <sstream>
#include <unordered_map>
#include <math.h>
#include "jsoncpp/json.h"
#include "Util/util.h"
#include "Util/logger.h"
......@@ -50,10 +51,14 @@ typedef enum {
#define API_FIELD "api."
const string kApiDebug = API_FIELD"apiDebug";
const string kSecret = API_FIELD"secret";
const string kSnapRoot = API_FIELD"snapRoot";
const string kDefaultSnap = API_FIELD"defaultSnap";
static onceToken token([]() {
mINI::Instance()[kApiDebug] = "1";
mINI::Instance()[kSecret] = "035c73f7-bb6b-4889-a715-d9eb2d1925cc";
mINI::Instance()[kSnapRoot] = "./www/snap/";
mINI::Instance()[kDefaultSnap] = "./www/logo.png";
});
}//namespace API
......@@ -145,7 +150,6 @@ static inline void addHttpListener(){
NoticeCenter::Instance().addListener(nullptr, Broadcast::kBroadcastHttpRequest, [](BroadcastHttpRequestArgs) {
auto it = s_map_api.find(parser.Url());
if (it == s_map_api.end()) {
consumed = false;
return;
}
//该api已被消费
......@@ -174,7 +178,7 @@ static inline void addHttpListener(){
size = body->remainSize();
}
if(size < 4 * 1024){
if(size && size < 4 * 1024){
string contentOut = body->readData(size)->toString();
DebugL << "\r\n# request:\r\n" << parser.Method() << " " << parser.FullUrl() << "\r\n"
<< "# content:\r\n" << parser.Content() << "\r\n"
......@@ -436,14 +440,14 @@ void installWebApi() {
api_regist1("/index/api/isMediaOnline",[](API_ARGS1){
CHECK_SECRET();
CHECK_ARGS("schema","vhost","app","stream");
val["online"] = (bool) (MediaSource::find(allArgs["schema"],allArgs["vhost"],allArgs["app"],allArgs["stream"],false));
val["online"] = (bool) (MediaSource::find(allArgs["schema"],allArgs["vhost"],allArgs["app"],allArgs["stream"]));
});
//测试url http://127.0.0.1/index/api/getMediaInfo?schema=rtsp&vhost=__defaultVhost__&app=live&stream=obs
api_regist1("/index/api/getMediaInfo",[](API_ARGS1){
CHECK_SECRET();
CHECK_ARGS("schema","vhost","app","stream");
auto src = MediaSource::find(allArgs["schema"],allArgs["vhost"],allArgs["app"],allArgs["stream"],false);
auto src = MediaSource::find(allArgs["schema"],allArgs["vhost"],allArgs["app"],allArgs["stream"]);
if(!src){
val["online"] = false;
return;
......@@ -817,6 +821,78 @@ void installWebApi() {
val["data"]["paths"] = paths;
});
static auto responseSnap = [](const string &snap_path,
const HttpSession::KeyValue &headerIn,
const HttpSession::HttpResponseInvoker &invoker) {
StrCaseMap headerOut;
struct stat statbuf = {0};
GET_CONFIG(string, defaultSnap, API::kDefaultSnap);
if (!(stat(snap_path.data(), &statbuf) == 0 && statbuf.st_size != 0) && !defaultSnap.empty()) {
//空文件且设置了预设图,则返回预设图片(也就是FFmpeg生成截图中空档期的默认图片)
const_cast<string&>(snap_path) = File::absolutePath(defaultSnap, "");
headerOut["Content-Type"] = HttpFileManager::getContentType(snap_path.data());
} else {
//之前生成的截图文件,我们默认为jpeg格式
headerOut["Content-Type"] = HttpFileManager::getContentType(".jpeg");
}
//返回图片给http客户端
invoker.responseFile(headerIn, headerOut, snap_path);
};
//获取截图缓存或者实时截图
//http://127.0.0.1/index/api/getSnap?url=rtmp://127.0.0.1/record/robot.mp4&timeout_sec=10&expire_sec=3
api_regist2("/index/api/getSnap", [](API_ARGS2){
CHECK_SECRET();
CHECK_ARGS("url", "timeout_sec", "expire_sec");
GET_CONFIG(string, snap_root, API::kSnapRoot);
int expire_sec = allArgs["expire_sec"];
auto scan_path = File::absolutePath(MD5(allArgs["url"]).hexdigest(), snap_root) + "/";
string snap_path;
File::scanDir(scan_path, [&](const string &path, bool isDir) {
if (isDir) {
//忽略文件夹
return true;
}
//找到截图
auto tm = FindField(path.data() + scan_path.size(), nullptr, ".jpeg");
if (atoll(tm.data()) + expire_sec < time(NULL)) {
//截图已经过期,删除之,后面重新生成
File::delete_file(path.data());
return true;
}
//截图未过期,中断遍历,返回上次生成的截图
snap_path = path;
return false;
});
if(!snap_path.empty()){
responseSnap(snap_path, headerIn, invoker);
return;
}
//无截图或者截图已经过期
snap_path = StrPrinter << scan_path << time(NULL) << ".jpeg";
//生成一个空文件,目的是顺便创建文件夹路径,
//同时防止在FFmpeg生成截图途中不停的尝试调用该api启动FFmpeg生成相同的截图
auto file = File::create_file(snap_path.data(), "wb");
if (file) {
fclose(file);
}
//启动FFmpeg进程,开始截图
FFmpegSnap::makeSnap(allArgs["url"],snap_path,allArgs["timeout_sec"],[invoker,headerIn,snap_path](bool success){
if(!success){
//生成截图失败,可能残留空文件
File::delete_file(snap_path.data());
}
responseSnap(snap_path, headerIn, invoker);
});
});
////////////以下是注册的Hook API////////////
api_regist1("/index/hook/on_publish",[](API_ARGS1){
//开始推流事件
......
......@@ -78,14 +78,6 @@ void DevChannel::inputH264(const char *data, int len, uint32_t dts, uint32_t pts
if(pts == 0){
pts = dts;
}
int prefixeSize;
if (memcmp("\x00\x00\x00\x01", data, 4) == 0) {
prefixeSize = 4;
} else if (memcmp("\x00\x00\x01", data, 3) == 0) {
prefixeSize = 3;
} else {
prefixeSize = 0;
}
//由于rtmp/hls/mp4需要缓存时间戳相同的帧,
//所以使用FrameNoCacheAble类型的帧反而会在转换成FrameCacheAble时多次内存拷贝
......@@ -93,9 +85,8 @@ void DevChannel::inputH264(const char *data, int len, uint32_t dts, uint32_t pts
H264Frame::Ptr frame = std::make_shared<H264Frame>();
frame->_dts = dts;
frame->_pts = pts;
frame->_buffer.assign("\x00\x00\x00\x01",4);
frame->_buffer.append(data + prefixeSize, len - prefixeSize);
frame->_prefix_size = 4;
frame->_buffer.assign(data, len);
frame->_prefix_size = prefixSize(data,len);
inputFrame(frame);
}
......@@ -106,14 +97,6 @@ void DevChannel::inputH265(const char *data, int len, uint32_t dts, uint32_t pts
if(pts == 0){
pts = dts;
}
int prefixeSize;
if (memcmp("\x00\x00\x00\x01", data, 4) == 0) {
prefixeSize = 4;
} else if (memcmp("\x00\x00\x01", data, 3) == 0) {
prefixeSize = 3;
} else {
prefixeSize = 0;
}
//由于rtmp/hls/mp4需要缓存时间戳相同的帧,
//所以使用FrameNoCacheAble类型的帧反而会在转换成FrameCacheAble时多次内存拷贝
......@@ -121,9 +104,8 @@ void DevChannel::inputH265(const char *data, int len, uint32_t dts, uint32_t pts
H265Frame::Ptr frame = std::make_shared<H265Frame>();
frame->_dts = dts;
frame->_pts = pts;
frame->_buffer.assign("\x00\x00\x00\x01",4);
frame->_buffer.append(data + prefixeSize, len - prefixeSize);
frame->_prefix_size = 4;
frame->_buffer.assign(data, len);
frame->_prefix_size = prefixSize(data,len);
inputFrame(frame);
}
......@@ -163,7 +145,9 @@ void DevChannel::inputG711(const char *data, int len, uint32_t dts){
if (dts == 0) {
dts = (uint32_t)_aTicker[1].elapsedTime();
}
inputFrame(std::make_shared<G711FrameNoCacheAble>(_audio->codecId, (char*)data, len, dts, 0));
auto frame = std::make_shared<G711FrameNoCacheAble>((char*)data, len, dts, 0);
frame->setCodec(_audio->codecId);
inputFrame(frame);
}
void DevChannel::initVideo(const VideoInfo &info) {
......
......@@ -43,9 +43,9 @@ public:
class AudioInfo {
public:
CodecId codecId = CodecAAC;
int iChannel;
int iSampleBit;
int iSampleRate;
int iChannel;
int iSampleBit;
int iSampleRate;
};
/**
......
......@@ -180,9 +180,8 @@ static void eraseIfEmpty(MAP &map, IT0 it0, IT1 it1, IT2 it2) {
}
};
void findAsync_l(const MediaInfo &info, const std::shared_ptr<TcpSession> &session, bool retry,
const function<void(const MediaSource::Ptr &src)> &cb){
auto src = MediaSource::find(info._schema, info._vhost, info._app, info._streamid, true);
void MediaSource::findAsync_l(const MediaInfo &info, const std::shared_ptr<TcpSession> &session, bool retry, const function<void(const MediaSource::Ptr &src)> &cb){
auto src = MediaSource::find_l(info._schema, info._vhost, info._app, info._streamid, true);
if(src || !retry){
cb(src);
return;
......@@ -248,7 +247,11 @@ void MediaSource::findAsync(const MediaInfo &info, const std::shared_ptr<TcpSess
return findAsync_l(info, session, true, cb);
}
MediaSource::Ptr MediaSource::find(const string &schema, const string &vhost_tmp, const string &app, const string &id, bool bMake) {
MediaSource::Ptr MediaSource::find(const string &schema, const string &vhost, const string &app, const string &id) {
return find_l(schema, vhost, app, id, false);
}
MediaSource::Ptr MediaSource::find_l(const string &schema, const string &vhost_tmp, const string &app, const string &id, bool bMake) {
string vhost = vhost_tmp;
if(vhost.empty()){
vhost = DEFAULT_VHOST;
......@@ -419,12 +422,10 @@ void MediaSourceEvent::onNoneReader(MediaSource &sender){
//如果mp4点播, 无人观看时我们强制关闭点播
bool is_mp4_vod = sender.getApp() == recordApp;
//无人观看mp4点播时,3秒后自动关闭
auto close_delay = is_mp4_vod ? 3.0 : stream_none_reader_delay / 1000.0;
//没有任何人观看该视频源,表明该源可以关闭了
weak_ptr<MediaSource> weakSender = sender.shared_from_this();
_async_close_timer = std::make_shared<Timer>(close_delay, [weakSender,is_mp4_vod]() {
_async_close_timer = std::make_shared<Timer>(stream_none_reader_delay / 1000.0, [weakSender,is_mp4_vod]() {
auto strongSender = weakSender.lock();
if (!strongSender) {
//对象已经销毁
......@@ -467,7 +468,7 @@ MediaSource::Ptr MediaSource::createFromMP4(const string &schema, const string &
try {
MP4Reader::Ptr pReader(new MP4Reader(vhost, app, stream, filePath));
pReader->startReadMP4();
return MediaSource::find(schema, vhost, app, stream, false);
return MediaSource::find(schema, vhost, app, stream);
} catch (std::exception &ex) {
WarnL << ex.what();
return nullptr;
......@@ -478,57 +479,51 @@ MediaSource::Ptr MediaSource::createFromMP4(const string &schema, const string &
#endif //ENABLE_MP4
}
static bool isFlushAble_default(bool is_audio, uint32_t last_stamp, uint32_t new_stamp, int cache_size) {
if (new_stamp < last_stamp) {
//时间戳回退(可能seek中)
static bool isFlushAble_default(bool is_video, uint32_t last_stamp, uint32_t new_stamp, int cache_size) {
if (new_stamp + 500 < last_stamp) {
//时间戳回退比较大(可能seek中),由于rtp中时间戳是pts,是可能存在一定程度的回退的
return true;
}
if (!is_audio) {
//这是视频,时间戳发送变化或者缓存超过1024个
return last_stamp != new_stamp || cache_size >= 1024;
}
//这是音频,缓存超过100ms或者缓存个数超过10个
return new_stamp > last_stamp + 100 || cache_size > 10;
//时间戳发送变化或者缓存超过1024个,sendmsg接口一般最多只能发送1024个数据包
return last_stamp != new_stamp || cache_size >= 1024;
}
static bool isFlushAble_merge(bool is_audio, uint32_t last_stamp, uint32_t new_stamp, int cache_size, int merge_ms) {
if (new_stamp < last_stamp) {
//时间戳回退(可能seek中)
static bool isFlushAble_merge(bool is_video, uint32_t last_stamp, uint32_t new_stamp, int cache_size, int merge_ms) {
if (new_stamp + 500 < last_stamp) {
//时间戳回退比较大(可能seek中),由于rtp中时间戳是pts,是可能存在一定程度的回退的
return true;
}
if(new_stamp > last_stamp + merge_ms){
if (new_stamp > last_stamp + merge_ms) {
//时间戳增量超过合并写阈值
return true;
}
if (!is_audio) {
//这是视频,缓存数超过1024个,这个逻辑用于避免时间戳异常的流导致的内存暴增问题
//而且sendmsg接口一般最多只能发送1024个数据包
return cache_size >= 1024;
}
//这是音频,音频缓存超过20个
return cache_size > 20;
//缓存数超过1024个,这个逻辑用于避免时间戳异常的流导致的内存暴增问题
//而且sendmsg接口一般最多只能发送1024个数据包
return cache_size >= 1024;
}
bool FlushPolicy::isFlushAble(uint32_t new_stamp, int cache_size) {
bool ret = false;
GET_CONFIG(int, mergeWriteMS, General::kMergeWriteMS);
if (mergeWriteMS <= 0) {
//关闭了合并写或者合并写阈值小于等于0
ret = isFlushAble_default(_is_audio, _last_stamp, new_stamp, cache_size);
bool FlushPolicy::isFlushAble(bool is_video, bool is_key, uint32_t new_stamp, int cache_size) {
bool flush_flag = false;
if (is_key && is_video) {
//遇到关键帧flush掉前面的数据,确保关键帧为该组数据的第一帧,确保GOP缓存有效
flush_flag = true;
} else {
ret = isFlushAble_merge(_is_audio, _last_stamp, new_stamp, cache_size, mergeWriteMS);
GET_CONFIG(int, mergeWriteMS, General::kMergeWriteMS);
if (mergeWriteMS <= 0) {
//关闭了合并写或者合并写阈值小于等于0
flush_flag = isFlushAble_default(is_video, _last_stamp[is_video], new_stamp, cache_size);
} else {
flush_flag = isFlushAble_merge(is_video, _last_stamp[is_video], new_stamp, cache_size, mergeWriteMS);
}
}
if (ret) {
// DebugL << _is_audio << " " << _last_stamp << " " << new_stamp;
_last_stamp = new_stamp;
if (flush_flag) {
_last_stamp[is_video] = new_stamp;
}
return ret;
return flush_flag;
}
} /* namespace mediakit */
\ No newline at end of file
......@@ -134,7 +134,7 @@ public:
virtual bool isRecording(Recorder::type type);
// 同步查找流
static Ptr find(const string &schema, const string &vhost, const string &app, const string &id, bool bMake = true) ;
static Ptr find(const string &schema, const string &vhost, const string &app, const string &id);
// 异步查找流
static void findAsync(const MediaInfo &info, const std::shared_ptr<TcpSession> &session, const function<void(const Ptr &src)> &cb);
// 遍历所有流
......@@ -142,9 +142,14 @@ public:
// 从mp4文件生成MediaSource
static MediaSource::Ptr createFromMP4(const string &schema, const string &vhost, const string &app, const string &stream, const string &filePath = "", bool checkApp = true);
protected:
void regist() ;
bool unregist() ;
bool unregist();
private:
static Ptr find_l(const string &schema, const string &vhost, const string &app, const string &id, bool bMake);
static void findAsync_l(const MediaInfo &info, const std::shared_ptr<TcpSession> &session, bool retry, const function<void(const MediaSource::Ptr &src)> &cb);
private:
string _strSchema;
string _strVhost;
......@@ -159,10 +164,7 @@ private:
///缓存刷新策略类
class FlushPolicy {
public:
FlushPolicy(bool is_audio) {
_is_audio = is_audio;
};
FlushPolicy() = default;
~FlushPolicy() = default;
uint32_t getStamp(const RtpPacket::Ptr &packet) {
......@@ -173,45 +175,45 @@ public:
return packet->timeStamp;
}
bool isFlushAble(uint32_t new_stamp, int cache_size);
bool isFlushAble(bool is_video, bool is_key, uint32_t new_stamp, int cache_size);
private:
bool _is_audio;
uint32_t _last_stamp= 0;
uint32_t _last_stamp[2] = {0, 0};
};
/// 视频合并写缓存模板
/// 合并写缓存模板
/// \tparam packet 包类型
/// \tparam policy 刷新缓存策略
/// \tparam packet_list 包缓存类型
template<typename packet, typename policy = FlushPolicy, typename packet_list = List<std::shared_ptr<packet> > >
class VideoPacketCache {
class PacketCache {
public:
VideoPacketCache() : _policy(false) {
PacketCache(){
_cache = std::make_shared<packet_list>();
}
virtual ~VideoPacketCache() = default;
virtual ~PacketCache() = default;
void inputVideo(const std::shared_ptr<packet> &rtp, bool key_pos) {
if (_policy.isFlushAble(_policy.getStamp(rtp), _cache->size())) {
void inputPacket(bool is_video, const std::shared_ptr<packet> &pkt, bool key_pos) {
if (_policy.isFlushAble(is_video, key_pos, _policy.getStamp(pkt), _cache->size())) {
flushAll();
}
//追加数据到最后
_cache->emplace_back(rtp);
_cache->emplace_back(pkt);
if (key_pos) {
_key_pos = key_pos;
}
}
virtual void onFlushVideo(std::shared_ptr<packet_list> &, bool key_pos) = 0;
virtual void onFlush(std::shared_ptr<packet_list> &, bool key_pos) = 0;
private:
void flushAll() {
if (_cache->empty()) {
return;
}
onFlushVideo(_cache, _key_pos);
onFlush(_cache, _key_pos);
_cache = std::make_shared<packet_list>();
_key_pos = false;
}
......@@ -222,44 +224,5 @@ private:
bool _key_pos = false;
};
/// 音频频合并写缓存模板
/// \tparam packet 包类型
/// \tparam policy 刷新缓存策略
/// \tparam packet_list 包缓存类型
template<typename packet, typename policy = FlushPolicy, typename packet_list = List<std::shared_ptr<packet> > >
class AudioPacketCache {
public:
AudioPacketCache() : _policy(true) {
_cache = std::make_shared<packet_list>();
}
virtual ~AudioPacketCache() = default;
void inputAudio(const std::shared_ptr<packet> &rtp) {
if (_policy.isFlushAble(_policy.getStamp(rtp), _cache->size())) {
flushAll();
}
//追加数据到最后
_cache->emplace_back(rtp);
}
virtual void onFlushAudio(std::shared_ptr<packet_list> &) = 0;
private:
void flushAll() {
if (_cache->empty()) {
return;
}
onFlushAudio(_cache);
_cache = std::make_shared<packet_list>();
}
private:
policy _policy;
std::shared_ptr<packet_list> _cache;
};
} /* namespace mediakit */
#endif //ZLMEDIAKIT_MEDIASOURCE_H
#endif //ZLMEDIAKIT_MEDIASOURCE_H
\ No newline at end of file
......@@ -298,8 +298,69 @@ void MultiMediaSourceMuxer::resetTracks() {
_muxer->resetTracks();
}
//该类实现frame级别的时间戳覆盖
class FrameModifyStamp : public Frame{
public:
typedef std::shared_ptr<FrameModifyStamp> Ptr;
FrameModifyStamp(const Frame::Ptr &frame, Stamp &stamp){
_frame = frame;
//覆盖时间戳
stamp.revise(frame->dts(), frame->pts(), _dts, _pts, true);
}
~FrameModifyStamp() override {}
uint32_t dts() const override{
return _dts;
}
uint32_t pts() const override{
return _pts;
}
uint32_t prefixSize() const override {
return _frame->prefixSize();
}
bool keyFrame() const override {
return _frame->keyFrame();
}
bool configFrame() const override {
return _frame->configFrame();
}
bool cacheAble() const override {
return _frame->cacheAble();
}
char *data() const override {
return _frame->data();
}
uint32_t size() const override {
return _frame->size();
}
CodecId getCodecId() const override {
return _frame->getCodecId();
}
private:
Frame::Ptr _frame;
int64_t _dts;
int64_t _pts;
};
void MultiMediaSourceMuxer::inputFrame(const Frame::Ptr &frame) {
_muxer->inputFrame(frame);
GET_CONFIG(bool,modify_stamp,General::kModifyStamp);
if(!modify_stamp){
//未开启时间戳覆盖
_muxer->inputFrame(frame);
}else{
//开启了时间戳覆盖
FrameModifyStamp::Ptr new_frame = std::make_shared<FrameModifyStamp>(frame,_stamp[frame->getTrackType()]);
//输入时间戳覆盖后的帧
_muxer->inputFrame(new_frame);
}
}
bool MultiMediaSourceMuxer::isEnabled(){
......
......@@ -178,6 +178,7 @@ public:
private:
MultiMuxerPrivate::Ptr _muxer;
std::weak_ptr<MediaSourceEvent> _listener;
Stamp _stamp[2];
};
}//namespace mediakit
......
......@@ -67,6 +67,7 @@ const string kPublishToRtxp = GENERAL_FIELD"publishToRtxp";
const string kPublishToHls = GENERAL_FIELD"publishToHls";
const string kPublishToMP4 = GENERAL_FIELD"publishToMP4";
const string kMergeWriteMS = GENERAL_FIELD"mergeWriteMS";
const string kModifyStamp = GENERAL_FIELD"modifyStamp";
onceToken token([](){
mINI::Instance()[kFlowThreshold] = 1024;
......@@ -79,6 +80,7 @@ onceToken token([](){
mINI::Instance()[kPublishToHls] = 1;
mINI::Instance()[kPublishToMP4] = 0;
mINI::Instance()[kMergeWriteMS] = 0;
mINI::Instance()[kModifyStamp] = 0;
},nullptr);
}//namespace General
......@@ -293,3 +295,10 @@ const string kBenchmarkMode = "benchmark_mode";
} // namespace mediakit
void Assert_Throw(int failed, const char *exp, const char *func, const char *file, int line){
if(failed) {
_StrPrinter printer;
printer << "Assertion failed: (" << exp << "), function " << func << ", file " << file << ", line " << line << ".";
throw std::runtime_error(printer);
}
}
......@@ -174,6 +174,8 @@ extern const string kPublishToMP4 ;
//合并写缓存大小(单位毫秒),合并写指服务器缓存一定的数据后才会一次性写入socket,这样能提高性能,但是会提高延时
//开启后会同时关闭TCP_NODELAY并开启MSG_MORE
extern const string kMergeWriteMS ;
//全局的时间戳覆盖开关,在转协议时,对frame进行时间戳覆盖
extern const string kModifyStamp;
}//namespace General
......@@ -217,6 +219,7 @@ extern const string kDirectProxy;
////////////RTMP服务器配置///////////
namespace Rtmp {
//rtmp推流时间戳覆盖开关
extern const string kModifyStamp;
//握手超时时间,默认15秒
extern const string kHandshakeSecond;
......
......@@ -9,65 +9,63 @@
*/
#include "AAC.h"
#ifdef ENABLE_MP4
#include "mpeg4-aac.h"
#endif
namespace mediakit{
void writeAdtsHeader(const AACFrame &hed, uint8_t *pcAdts) {
pcAdts[0] = (hed.syncword >> 4 & 0xFF); //8bit
pcAdts[1] = (hed.syncword << 4 & 0xF0); //4 bit
pcAdts[1] |= (hed.id << 3 & 0x08); //1 bit
pcAdts[1] |= (hed.layer << 1 & 0x06); //2bit
pcAdts[1] |= (hed.protection_absent & 0x01); //1 bit
pcAdts[2] = (hed.profile << 6 & 0xC0); // 2 bit
pcAdts[2] |= (hed.sf_index << 2 & 0x3C); //4bit
pcAdts[2] |= (hed.private_bit << 1 & 0x02); //1 bit
pcAdts[2] |= (hed.channel_configuration >> 2 & 0x03); //1 bit
pcAdts[3] = (hed.channel_configuration << 6 & 0xC0); // 2 bit
pcAdts[3] |= (hed.original << 5 & 0x20); //1 bit
pcAdts[3] |= (hed.home << 4 & 0x10); //1 bit
pcAdts[3] |= (hed.copyright_identification_bit << 3 & 0x08); //1 bit
pcAdts[3] |= (hed.copyright_identification_start << 2 & 0x04); //1 bit
pcAdts[3] |= (hed.aac_frame_length >> 11 & 0x03); //2 bit
pcAdts[4] = (hed.aac_frame_length >> 3 & 0xFF); //8 bit
pcAdts[5] = (hed.aac_frame_length << 5 & 0xE0); //3 bit
pcAdts[5] |= (hed.adts_buffer_fullness >> 6 & 0x1F); //5 bit
pcAdts[6] = (hed.adts_buffer_fullness << 2 & 0xFC); //6 bit
pcAdts[6] |= (hed.no_raw_data_blocks_in_frame & 0x03); //2 bit
unsigned const samplingFrequencyTable[16] = { 96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050, 16000, 12000, 11025, 8000, 7350, 0, 0, 0 };
class AdtsHeader{
public:
unsigned int syncword = 0; //12 bslbf 同步字The bit string ‘1111 1111 1111’,说明一个ADTS帧的开始
unsigned int id; //1 bslbf MPEG 标示符, 设置为1
unsigned int layer; //2 uimsbf Indicates which layer is used. Set to ‘00’
unsigned int protection_absent; //1 bslbf 表示是否误码校验
unsigned int profile; //2 uimsbf 表示使用哪个级别的AAC,如01 Low Complexity(LC)--- AACLC
unsigned int sf_index; //4 uimsbf 表示使用的采样率下标
unsigned int private_bit; //1 bslbf
unsigned int channel_configuration; //3 uimsbf 表示声道数
unsigned int original; //1 bslbf
unsigned int home; //1 bslbf
//下面的为改变的参数即每一帧都不同
unsigned int copyright_identification_bit; //1 bslbf
unsigned int copyright_identification_start; //1 bslbf
unsigned int aac_frame_length; // 13 bslbf 一个ADTS帧的长度包括ADTS头和raw data block
unsigned int adts_buffer_fullness; //11 bslbf 0x7FF 说明是码率可变的码流
//no_raw_data_blocks_in_frame 表示ADTS帧中有number_of_raw_data_blocks_in_frame + 1个AAC原始帧.
//所以说number_of_raw_data_blocks_in_frame == 0
//表示说ADTS帧中有一个AAC数据块并不是说没有。(一个AAC原始帧包含一段时间内1024个采样及相关数据)
unsigned int no_raw_data_blocks_in_frame; //2 uimsfb
};
static void dumpAdtsHeader(const AdtsHeader &hed, uint8_t *out) {
out[0] = (hed.syncword >> 4 & 0xFF); //8bit
out[1] = (hed.syncword << 4 & 0xF0); //4 bit
out[1] |= (hed.id << 3 & 0x08); //1 bit
out[1] |= (hed.layer << 1 & 0x06); //2bit
out[1] |= (hed.protection_absent & 0x01); //1 bit
out[2] = (hed.profile << 6 & 0xC0); // 2 bit
out[2] |= (hed.sf_index << 2 & 0x3C); //4bit
out[2] |= (hed.private_bit << 1 & 0x02); //1 bit
out[2] |= (hed.channel_configuration >> 2 & 0x03); //1 bit
out[3] = (hed.channel_configuration << 6 & 0xC0); // 2 bit
out[3] |= (hed.original << 5 & 0x20); //1 bit
out[3] |= (hed.home << 4 & 0x10); //1 bit
out[3] |= (hed.copyright_identification_bit << 3 & 0x08); //1 bit
out[3] |= (hed.copyright_identification_start << 2 & 0x04); //1 bit
out[3] |= (hed.aac_frame_length >> 11 & 0x03); //2 bit
out[4] = (hed.aac_frame_length >> 3 & 0xFF); //8 bit
out[5] = (hed.aac_frame_length << 5 & 0xE0); //3 bit
out[5] |= (hed.adts_buffer_fullness >> 6 & 0x1F); //5 bit
out[6] = (hed.adts_buffer_fullness << 2 & 0xFC); //6 bit
out[6] |= (hed.no_raw_data_blocks_in_frame & 0x03); //2 bit
}
string makeAdtsConfig(const uint8_t *pcAdts){
if (!(pcAdts[0] == 0xFF && (pcAdts[1] & 0xF0) == 0xF0)) {
return "";
}
// Get and check the 'profile':
unsigned char profile = (pcAdts[2] & 0xC0) >> 6; // 2 bits
if (profile == 3) {
return "";
}
// Get and check the 'sampling_frequency_index':
unsigned char sampling_frequency_index = (pcAdts[2] & 0x3C) >> 2; // 4 bits
if (samplingFrequencyTable[sampling_frequency_index] == 0) {
return "";
}
// Get and check the 'channel_configuration':
unsigned char channel_configuration = ((pcAdts[2] & 0x01) << 2)
| ((pcAdts[3] & 0xC0) >> 6); // 3 bits
unsigned char audioSpecificConfig[2];
unsigned char const audioObjectType = profile + 1;
audioSpecificConfig[0] = (audioObjectType << 3) | (sampling_frequency_index >> 1);
audioSpecificConfig[1] = (sampling_frequency_index << 7) | (channel_configuration << 3);
return string((char *)audioSpecificConfig,2);
}
void makeAdtsHeader(const string &strAudioCfg,AACFrame &adts) {
uint8_t cfg1 = strAudioCfg[0];
uint8_t cfg2 = strAudioCfg[1];
static void parseAacConfig(const string &config, AdtsHeader &adts) {
uint8_t cfg1 = config[0];
uint8_t cfg2 = config[1];
int audioObjectType;
int sampling_frequency_index;
......@@ -93,9 +91,83 @@ void makeAdtsHeader(const string &strAudioCfg,AACFrame &adts) {
adts.adts_buffer_fullness = 2047;
adts.no_raw_data_blocks_in_frame = 0;
}
void getAACInfo(const AACFrame &adts,int &iSampleRate,int &iChannel){
iSampleRate = samplingFrequencyTable[adts.sf_index];
iChannel = adts.channel_configuration;
string makeAacConfig(const uint8_t *hex, int length){
#ifndef ENABLE_MP4
if (!(hex[0] == 0xFF && (hex[1] & 0xF0) == 0xF0)) {
return "";
}
// Get and check the 'profile':
unsigned char profile = (hex[2] & 0xC0) >> 6; // 2 bits
if (profile == 3) {
return "";
}
// Get and check the 'sampling_frequency_index':
unsigned char sampling_frequency_index = (hex[2] & 0x3C) >> 2; // 4 bits
if (samplingFrequencyTable[sampling_frequency_index] == 0) {
return "";
}
// Get and check the 'channel_configuration':
unsigned char channel_configuration = ((hex[2] & 0x01) << 2) | ((hex[3] & 0xC0) >> 6); // 3 bits
unsigned char audioSpecificConfig[2];
unsigned char const audioObjectType = profile + 1;
audioSpecificConfig[0] = (audioObjectType << 3) | (sampling_frequency_index >> 1);
audioSpecificConfig[1] = (sampling_frequency_index << 7) | (channel_configuration << 3);
return string((char *)audioSpecificConfig,2);
#else
struct mpeg4_aac_t aac = {0};
if (mpeg4_aac_adts_load(hex, length, &aac) > 0) {
char buf[32] = {0};
int len = mpeg4_aac_audio_specific_config_save(&aac, (uint8_t *) buf, sizeof(buf));
if (len > 0) {
return string(buf, len);
}
}
WarnL << "生成aac config失败, adts header:" << hexdump(hex, length);
return "";
#endif
}
int dumpAacConfig(const string &config, int length, uint8_t *out, int out_size) {
#ifndef ENABLE_MP4
AdtsHeader header;
parseAacConfig(config, header);
header.aac_frame_length = length;
dumpAdtsHeader(header, out);
return ADTS_HEADER_LEN;
#else
struct mpeg4_aac_t aac = {0};
int ret = mpeg4_aac_audio_specific_config_load((uint8_t *) config.data(), config.size(), &aac);
if (ret > 0) {
ret = mpeg4_aac_adts_save(&aac, length, out, out_size);
}
if (ret < 0) {
WarnL << "生成adts头失败:" << ret << ", aac config:" << hexdump(config.data(), config.size());
}
return ret;
#endif
}
bool parseAacConfig(const string &config, int &samplerate, int &channels){
#ifndef ENABLE_MP4
AdtsHeader header;
parseAacConfig(config, header);
samplerate = samplingFrequencyTable[header.sf_index];
channels = header.channel_configuration;
return true;
#else
struct mpeg4_aac_t aac = {0};
int ret = mpeg4_aac_audio_specific_config_load((uint8_t *) config.data(), config.size(), &aac);
if (ret > 0) {
samplerate = aac.sampling_frequency;
channels = aac.channels;
return true;
}
WarnL << "获取aac采样率、声道数失败:" << hexdump(config.data(), config.size());
return false;
#endif
}
Sdp::Ptr AACTrack::getSdp() {
......@@ -103,9 +175,7 @@ Sdp::Ptr AACTrack::getSdp() {
WarnL << getCodecName() << " Track未准备好";
return nullptr;
}
return std::make_shared<AACSdp>(getAacCfg(),getAudioSampleRate());
return std::make_shared<AACSdp>(getAacCfg(),getAudioSampleRate(), getAudioChannel());
}
}//namespace mediakit
}//namespace mediakit
\ No newline at end of file
......@@ -13,18 +13,6 @@
namespace mediakit{
AACRtmpDecoder::AACRtmpDecoder() {
_adts = obtainFrame();
}
AACFrame::Ptr AACRtmpDecoder::obtainFrame() {
//从缓存池重新申请对象,防止覆盖已经写入环形缓存的对象
auto frame = ResourcePoolHelper<AACFrame>::obtainObj();
frame->aac_frame_length = 7;
frame->iPrefixSize = 7;
return frame;
}
static string getAacCfg(const RtmpPacket &thiz) {
string ret;
if (thiz.getMediaType() != FLV_CODEC_AAC) {
......@@ -37,11 +25,11 @@ static string getAacCfg(const RtmpPacket &thiz) {
WarnL << "bad aac cfg!";
return ret;
}
ret = thiz.strBuf.substr(2, 2);
ret = thiz.strBuf.substr(2);
return ret;
}
bool AACRtmpDecoder::inputRtmp(const RtmpPacket::Ptr &pkt, bool key_pos) {
bool AACRtmpDecoder::inputRtmp(const RtmpPacket::Ptr &pkt, bool) {
if (pkt->isCfgFrame()) {
_aac_cfg = getAacCfg(*pkt);
return false;
......@@ -52,26 +40,28 @@ bool AACRtmpDecoder::inputRtmp(const RtmpPacket::Ptr &pkt, bool key_pos) {
return false;
}
void AACRtmpDecoder::onGetAAC(const char* pcData, int iLen, uint32_t ui32TimeStamp) {
if(iLen + 7 > sizeof(_adts->buffer)){
WarnL << "Illegal adts data, exceeding the length limit.";
return;
}
//写adts结构头
makeAdtsHeader(_aac_cfg,*_adts);
void AACRtmpDecoder::onGetAAC(const char* data, int len, uint32_t stamp) {
auto frame = ResourcePoolHelper<AACFrame>::obtainObj();
//拷贝aac负载
memcpy(_adts->buffer + 7, pcData, iLen);
_adts->aac_frame_length = 7 + iLen;
_adts->timeStamp = ui32TimeStamp;
//生成adts头
char adts_header[32] = {0};
auto size = dumpAacConfig(_aac_cfg, len, (uint8_t *) adts_header, sizeof(adts_header));
if (size > 0) {
frame->_buffer.assign(adts_header, size);
frame->_prefix_size = size;
} else {
frame->_buffer.clear();
frame->_prefix_size = 0;
}
//adts结构头转成头7个字节
writeAdtsHeader(*_adts, _adts->buffer);
//追加负载数据
frame->_buffer.append(data, len);
frame->_dts = stamp;
//写入环形缓存
RtmpCodec::inputFrame(_adts);
_adts = obtainFrame();
RtmpCodec::inputFrame(frame);
}
/////////////////////////////////////////////////////////////////////////////////////
AACRtmpEncoder::AACRtmpEncoder(const Track::Ptr &track) {
......@@ -91,9 +81,9 @@ void AACRtmpEncoder::makeConfigPacket() {
void AACRtmpEncoder::inputFrame(const Frame::Ptr &frame) {
if (_aac_cfg.empty()) {
if (frame->prefixSize() >= 7) {
if (frame->prefixSize()) {
//包含adts头,从adts头获取aac配置信息
_aac_cfg = makeAdtsConfig((uint8_t *)(frame->data()));
_aac_cfg = makeAacConfig((uint8_t *) (frame->data()), frame->prefixSize());
}
makeConfigPacket();
}
......
......@@ -23,7 +23,7 @@ class AACRtmpDecoder : public RtmpCodec , public ResourcePoolHelper<AACFrame> {
public:
typedef std::shared_ptr<AACRtmpDecoder> Ptr;
AACRtmpDecoder();
AACRtmpDecoder() {}
~AACRtmpDecoder() {}
/**
......@@ -33,19 +33,14 @@ public:
*/
bool inputRtmp(const RtmpPacket::Ptr &Rtmp, bool key_pos = false) override;
TrackType getTrackType() const override{
return TrackAudio;
}
CodecId getCodecId() const override{
return CodecAAC;
}
protected:
void onGetAAC(const char* pcData, int iLen, uint32_t ui32TimeStamp);
AACFrame::Ptr obtainFrame();
protected:
AACFrame::Ptr _adts;
private:
void onGetAAC(const char *data, int len, uint32_t stamp);
private:
string _aac_cfg;
};
......@@ -76,11 +71,14 @@ public:
* 生成config包
*/
void makeConfigPacket() override;
private:
void makeAudioConfigPkt();
private:
uint8_t _audio_flv_flags;
AACTrack::Ptr _track;
string _aac_cfg;
};
}//namespace mediakit
......
......@@ -9,19 +9,19 @@
*/
#include "AACRtp.h"
#define ADTS_HEADER_LEN 7
#define AAC_MAX_FRAME_SIZE (2 * 1024)
namespace mediakit{
AACRtpEncoder::AACRtpEncoder(uint32_t ui32Ssrc,
uint32_t ui32MtuSize,
uint32_t ui32SampleRate,
uint8_t ui8PlayloadType,
uint8_t ui8PayloadType,
uint8_t ui8Interleaved) :
RtpInfo(ui32Ssrc,
ui32MtuSize,
ui32SampleRate,
ui8PlayloadType,
ui8PayloadType,
ui8Interleaved){
}
......@@ -56,32 +56,30 @@ void AACRtpEncoder::inputFrame(const Frame::Ptr &frame) {
}
void AACRtpEncoder::makeAACRtp(const void *data, unsigned int len, bool mark, uint32_t uiStamp) {
RtpCodec::inputRtp(makeRtp(getTrackType(),data,len,mark,uiStamp), false);
RtpCodec::inputRtp(makeRtp(getTrackType(), data, len, mark, uiStamp), false);
}
/////////////////////////////////////////////////////////////////////////////////////
AACRtpDecoder::AACRtpDecoder(const Track::Ptr &track){
AACRtpDecoder::AACRtpDecoder(const Track::Ptr &track) {
auto aacTrack = dynamic_pointer_cast<AACTrack>(track);
if(!aacTrack || !aacTrack->ready()){
if (!aacTrack || !aacTrack->ready()) {
WarnL << "该aac track无效!";
}else{
} else {
_aac_cfg = aacTrack->getAacCfg();
}
_adts = obtainFrame();
_frame = obtainFrame();
}
AACRtpDecoder::AACRtpDecoder() {
_adts = obtainFrame();
_frame = obtainFrame();
}
AACFrame::Ptr AACRtpDecoder::obtainFrame() {
//从缓存池重新申请对象,防止覆盖已经写入环形缓存的对象
auto frame = ResourcePoolHelper<AACFrame>::obtainObj();
frame->aac_frame_length = ADTS_HEADER_LEN;
frame->iPrefixSize = ADTS_HEADER_LEN;
if(frame->syncword == 0 && !_aac_cfg.empty()) {
makeAdtsHeader(_aac_cfg,*frame);
}
frame->_prefix_size = 0;
frame->_buffer.clear();
return frame;
}
......@@ -96,20 +94,18 @@ bool AACRtpDecoder::inputRtp(const RtpPacket::Ptr &rtppack, bool key_pos) {
//忽略Au-Header区
ptr += 2 + au_header_count * 2;
static const uint32_t max_size = sizeof(AACFrame::buffer) - ADTS_HEADER_LEN;
while (ptr < end) {
auto size = (uint32_t) (end - ptr);
if(size > max_size){
size = max_size;
if (size > AAC_MAX_FRAME_SIZE) {
size = AAC_MAX_FRAME_SIZE;
}
if (_adts->aac_frame_length + size > sizeof(AACFrame::buffer)) {
if (_frame->size() + size > AAC_MAX_FRAME_SIZE) {
//数据太多了,先清空
flushData();
}
//追加aac数据
memcpy(_adts->buffer + _adts->aac_frame_length, ptr, size);
_adts->aac_frame_length += size;
_adts->timeStamp = rtppack->timeStamp;
_frame->_buffer.append((char *) ptr, size);
_frame->_dts = rtppack->timeStamp;
ptr += size;
}
......@@ -120,15 +116,22 @@ bool AACRtpDecoder::inputRtp(const RtpPacket::Ptr &rtppack, bool key_pos) {
return false;
}
void AACRtpDecoder::flushData() {
if(_adts->aac_frame_length == ADTS_HEADER_LEN){
if (_frame->_buffer.empty()) {
//没有有效数据
return;
}
writeAdtsHeader(*_adts, _adts->buffer);
RtpCodec::inputFrame(_adts);
_adts = obtainFrame();
//插入adts头
char adts_header[32] = {0};
auto size = dumpAacConfig(_aac_cfg, _frame->_buffer.size(), (uint8_t *) adts_header, sizeof(adts_header));
if (size > 0) {
//插入adts头
_frame->_buffer.insert(0, adts_header, size);
_frame->_prefix_size = size;
}
RtpCodec::inputFrame(_frame);
_frame = obtainFrame();
}
......
......@@ -31,19 +31,19 @@ public:
*/
bool inputRtp(const RtpPacket::Ptr &rtp, bool key_pos = false) override;
TrackType getTrackType() const override{
return TrackAudio;
}
CodecId getCodecId() const override{
CodecId getCodecId() const override {
return CodecAAC;
}
protected:
AACRtpDecoder();
private:
AACFrame::Ptr obtainFrame();
void flushData();
private:
AACFrame::Ptr _adts;
AACFrame::Ptr _frame;
string _aac_cfg;
};
......@@ -59,13 +59,13 @@ public:
* @param ui32Ssrc ssrc
* @param ui32MtuSize mtu 大小
* @param ui32SampleRate 采样率
* @param ui8PlayloadType pt类型
* @param ui8PayloadType pt类型
* @param ui8Interleaved rtsp interleaved 值
*/
AACRtpEncoder(uint32_t ui32Ssrc,
uint32_t ui32MtuSize,
uint32_t ui32SampleRate,
uint8_t ui8PlayloadType = 97,
uint8_t ui8PayloadType = 97,
uint8_t ui8Interleaved = TrackAudio * 2);
~AACRtpEncoder() {}
......@@ -74,8 +74,10 @@ public:
* @param frame 带dats头的aac数据
*/
void inputFrame(const Frame::Ptr &frame) override;
private:
void makeAACRtp(const void *pData, unsigned int uiLen, bool bMark, uint32_t uiStamp);
private:
unsigned char _aucSectionBuf[1600];
};
......
......@@ -33,17 +33,12 @@ Track::Ptr Factory::getTrackBySdp(const SdpTrack::Ptr &track) {
return nullptr;
}
string aac_cfg;
unsigned int cfg1;
sscanf(aac_cfg_str.substr(0, 2).data(), "%02X", &cfg1);
cfg1 &= 0x00FF;
aac_cfg.push_back(cfg1);
unsigned int cfg2;
sscanf(aac_cfg_str.substr(2, 2).data(), "%02X", &cfg2);
cfg2 &= 0x00FF;
aac_cfg.push_back(cfg2);
for(int i = 0 ; i < aac_cfg_str.size() / 2 ; ++i ){
unsigned int cfg;
sscanf(aac_cfg_str.substr(i * 2, 2).data(), "%02X", &cfg);
cfg &= 0x00FF;
aac_cfg.push_back((char)cfg);
}
return std::make_shared<AACTrack>(aac_cfg);
}
......@@ -115,7 +110,7 @@ RtpCodec::Ptr Factory::getRtpEncoderBySdp(const Sdp::Ptr &sdp) {
}
auto mtu = (sdp->getTrackType() == TrackVideo ? video_mtu : audio_mtu);
auto sample_rate = sdp->getSampleRate();
auto pt = sdp->getPlayloadType();
auto pt = sdp->getPayloadType();
auto interleaved = sdp->getTrackType() * 2;
auto codec_id = sdp->getCodecId();
switch (codec_id){
......@@ -221,13 +216,27 @@ Track::Ptr Factory::getAudioTrackByAmf(const AMFValue& amf, int sample_rate, int
return getTrackByCodecId(codecId, sample_rate, channels, sample_bit);
}
RtmpCodec::Ptr Factory::getRtmpCodecByTrack(const Track::Ptr &track) {
RtmpCodec::Ptr Factory::getRtmpCodecByTrack(const Track::Ptr &track, bool is_encode) {
switch (track->getCodecId()){
case CodecH264 : return std::make_shared<H264RtmpEncoder>(track);
case CodecAAC : return std::make_shared<AACRtmpEncoder>(track);
case CodecH265 : return std::make_shared<H265RtmpEncoder>(track);
case CodecG711A :
case CodecG711U : return std::make_shared<G711RtmpEncoder>(track);
case CodecG711A :
case CodecG711U : {
auto audio_track = dynamic_pointer_cast<AudioTrack>(track);
if (is_encode && (audio_track->getAudioSampleRate() != 8000 ||
audio_track->getAudioChannel() != 1 ||
audio_track->getAudioSampleBit() != 16)) {
//rtmp对g711只支持8000/1/16规格,但是ZLMediaKit可以解析其他规格的G711
WarnL << "RTMP只支持8000/1/16规格的G711,目前规格是:"
<< audio_track->getAudioSampleRate() << "/"
<< audio_track->getAudioChannel() << "/"
<< audio_track->getAudioSampleBit()
<< ",该音频已被忽略";
return nullptr;
}
return std::make_shared<G711RtmpEncoder>(track);
}
default : WarnL << "暂不支持该CodecId:" << track->getCodecName(); return nullptr;
}
}
......
......@@ -59,8 +59,9 @@ public:
/**
* 根据Track获取Rtmp的编解码器
* @param track 媒体描述对象
* @param is_encode 是否为编码器还是解码器
*/
static RtmpCodec::Ptr getRtmpCodecByTrack(const Track::Ptr &track);
static RtmpCodec::Ptr getRtmpCodecByTrack(const Track::Ptr &track, bool is_encode);
/**
* 根据codecId获取rtmp的codec描述
......
......@@ -15,6 +15,59 @@ using namespace toolkit;
namespace mediakit{
/**
* 该对象的功能是把一个不可缓存的帧转换成可缓存的帧
*/
class FrameCacheAble : public FrameFromPtr {
public:
typedef std::shared_ptr<FrameCacheAble> Ptr;
FrameCacheAble(const Frame::Ptr &frame){
if(frame->cacheAble()){
_frame = frame;
_ptr = frame->data();
}else{
_buffer = std::make_shared<BufferRaw>();
_buffer->assign(frame->data(),frame->size());
_ptr = _buffer->data();
}
_size = frame->size();
_dts = frame->dts();
_pts = frame->pts();
_prefix_size = frame->prefixSize();
_codecid = frame->getCodecId();
_key = frame->keyFrame();
_config = frame->configFrame();
}
virtual ~FrameCacheAble() = default;
/**
* 可以被缓存
*/
bool cacheAble() const override {
return true;
}
CodecId getCodecId() const override{
return _codecid;
}
bool keyFrame() const override{
return _key;
}
bool configFrame() const override{
return _config;
}
private:
Frame::Ptr _frame;
BufferRaw::Ptr _buffer;
CodecId _codecid;
bool _key;
bool _config;
};
Frame::Ptr Frame::getCacheAbleFrame(const Frame::Ptr &frame){
if(frame->cacheAble()){
return frame;
......@@ -23,17 +76,35 @@ Frame::Ptr Frame::getCacheAbleFrame(const Frame::Ptr &frame){
}
#define SWITCH_CASE(codec_id) case codec_id : return #codec_id
const char *CodecInfo::getCodecName() {
switch (getCodecId()) {
const char *getCodecName(CodecId codecId) {
switch (codecId) {
SWITCH_CASE(CodecH264);
SWITCH_CASE(CodecH265);
SWITCH_CASE(CodecAAC);
SWITCH_CASE(CodecG711A);
SWITCH_CASE(CodecG711U);
default:
return "unknown codec";
SWITCH_CASE(CodecOpus);
default : return "unknown codec";
}
}
}//namespace mediakit
TrackType getTrackType(CodecId codecId){
switch (codecId){
case CodecH264:
case CodecH265: return TrackVideo;
case CodecAAC:
case CodecG711A:
case CodecG711U:
case CodecOpus: return TrackAudio;
default: return TrackInvalid;
}
}
const char *CodecInfo::getCodecName() {
return mediakit::getCodecName(getCodecId());
}
TrackType CodecInfo::getTrackType() {
return mediakit::getTrackType(getCodecId());
}
}//namespace mediakit
......@@ -28,6 +28,7 @@ typedef enum {
CodecAAC,
CodecG711A,
CodecG711U,
CodecOpus,
CodecMax = 0x7FFF
} CodecId;
......@@ -40,6 +41,16 @@ typedef enum {
} TrackType;
/**
* 获取编码器名称
*/
const char *getCodecName(CodecId codecId);
/**
* 获取音视频类型
*/
TrackType getTrackType(CodecId codecId);
/**
* 编码信息的抽象接口
*/
class CodecInfo {
......@@ -50,20 +61,19 @@ public:
virtual ~CodecInfo(){}
/**
* 获取音视频类型
*/
virtual TrackType getTrackType() const = 0;
/**
* 获取编解码器类型
*/
virtual CodecId getCodecId() const = 0;
/**
* 获取编码器名称
* @return 编码器名称
*/
const char *getCodecName();
/**
* 获取音视频类型
*/
TrackType getTrackType();
};
/**
......@@ -76,15 +86,11 @@ public:
/**
* 返回解码时间戳,单位毫秒
* @return
*/
virtual uint32_t dts() const = 0;
/**
* 返回显示时间戳,单位毫秒
* @return
*/
virtual uint32_t pts() const {
return dts();
......@@ -98,13 +104,11 @@ public:
/**
* 返回是否为关键帧
* @return
*/
virtual bool keyFrame() const = 0;
/**
* 是否为配置帧,譬如sps pps vps
* @return
*/
virtual bool configFrame() const = 0;
......@@ -115,14 +119,77 @@ public:
/**
* 返回可缓存的frame
* @return
*/
static Ptr getCacheAbleFrame(const Ptr &frame);
};
class FrameImp : public Frame {
public:
typedef std::shared_ptr<FrameImp> Ptr;
char *data() const override{
return (char *)_buffer.data();
}
uint32_t size() const override {
return _buffer.size();
}
uint32_t dts() const override {
return _dts;
}
uint32_t pts() const override{
return _pts ? _pts : _dts;
}
uint32_t prefixSize() const override{
return _prefix_size;
}
CodecId getCodecId() const override{
return _codecid;
}
bool keyFrame() const override {
return false;
}
bool configFrame() const override{
return false;
}
public:
CodecId _codecid = CodecInvalid;
string _buffer;
uint32_t _dts = 0;
uint32_t _pts = 0;
uint32_t _prefix_size = 0;
};
/**
* 一个Frame类中可以有多个帧,他们通过 0x 00 00 01 分隔
* ZLMediaKit会先把这种复合帧split成单个帧然后再处理
* 一个复合帧可以通过无内存拷贝的方式切割成多个子Frame
* 提供该类的目的是切割复合帧时防止内存拷贝,提高性能
*/
template<typename Parent>
class FrameInternal : public Parent{
public:
typedef std::shared_ptr<FrameInternal> Ptr;
FrameInternal(const Frame::Ptr &parent_frame, char *ptr, uint32_t size, int prefix_size)
: Parent(ptr, size, parent_frame->dts(), parent_frame->pts(), prefix_size) {
_parent_frame = parent_frame;
}
bool cacheAble() const override {
return _parent_frame->cacheAble();
}
private:
Frame::Ptr _parent_frame;
};
/**
* 循环池辅助类
* @tparam T
*/
template <typename T>
class ResourcePoolHelper{
......@@ -140,18 +207,17 @@ private:
};
/**
* 写帧接口的抽闲接口
* 写帧接口的抽象接口类
*/
class FrameWriterInterface {
public:
typedef std::shared_ptr<FrameWriterInterface> Ptr;
FrameWriterInterface(){}
virtual ~FrameWriterInterface(){}
/**
* 写入帧数据
* @param frame 帧
*/
* 写入帧数据
*/
virtual void inputFrame(const Frame::Ptr &frame) = 0;
};
......@@ -165,16 +231,16 @@ public:
/**
* inputFrame后触发onWriteFrame回调
* @param cb
*/
FrameWriterInterfaceHelper(const onWriteFrame& cb){
_writeCallback = cb;
}
virtual ~FrameWriterInterfaceHelper(){}
/**
* 写入帧数据
* @param frame 帧
*/
* 写入帧数据
*/
void inputFrame(const Frame::Ptr &frame) override {
_writeCallback(frame);
}
......@@ -182,7 +248,6 @@ private:
onWriteFrame _writeCallback;
};
/**
* 支持代理转发的帧环形缓存
*/
......@@ -193,6 +258,9 @@ public:
FrameDispatcher(){}
virtual ~FrameDispatcher(){}
/**
* 添加代理
*/
void addDelegate(const FrameWriterInterface::Ptr &delegate){
//_delegates_write可能多线程同时操作
lock_guard<mutex> lck(_mtx);
......@@ -200,7 +268,10 @@ public:
_need_update = true;
}
void delDelegate(void *ptr){
/**
* 删除代理
*/
void delDelegate(FrameWriterInterface *ptr){
//_delegates_write可能多线程同时操作
lock_guard<mutex> lck(_mtx);
_delegates_write.erase(ptr);
......@@ -208,8 +279,7 @@ public:
}
/**
* 写入帧数据
* @param frame 帧
* 写入帧并派发
*/
void inputFrame(const Frame::Ptr &frame) override{
if(_need_update){
......@@ -223,7 +293,13 @@ public:
for(auto &pr : _delegates_read){
pr.second->inputFrame(frame);
}
}
/**
* 返回代理个数
*/
int size() const {
return _delegates_write.size();
}
private:
mutex _mtx;
......@@ -250,105 +326,23 @@ public:
}
uint32_t pts() const override{
if(_pts){
return _pts;
}
return dts();
return _pts ? _pts : dts();
}
uint32_t prefixSize() const override{
return _prefixSize;
return _prefix_size;
}
bool cacheAble() const override {
return false;
}
protected:
char *_ptr;
uint32_t _size;
uint32_t _dts;
uint32_t _pts = 0;
uint32_t _prefixSize;
};
/**
* 不可缓存的帧,在DevChannel类中有用到。
* 该帧类型用于防止内存拷贝,直接使用指针传递数据
* 在大多数情况下,ZLMediaKit是同步对帧数据进行使用和处理的
* 所以提供此类型的帧很有必要,但是有时又无法避免缓存帧做后续处理
* 所以可以通过Frame::getCacheAbleFrame方法拷贝一个可缓存的帧
*/
class FrameNoCacheAble : public FrameFromPtr{
public:
typedef std::shared_ptr<FrameNoCacheAble> Ptr;
/**
* 该帧不可缓存
* @return
*/
bool cacheAble() const override {
return false;
}
uint32_t _prefix_size;
};
/**
* 该对象的功能是把一个不可缓存的帧转换成可缓存的帧
* @see FrameNoCacheAble
*/
class FrameCacheAble : public FrameFromPtr {
public:
typedef std::shared_ptr<FrameCacheAble> Ptr;
FrameCacheAble(const Frame::Ptr &frame){
if(frame->cacheAble()){
_frame = frame;
_ptr = frame->data();
}else{
_buffer = std::make_shared<BufferRaw>();
_buffer->assign(frame->data(),frame->size());
_ptr = _buffer->data();
}
_size = frame->size();
_dts = frame->dts();
_pts = frame->pts();
_prefixSize = frame->prefixSize();
_trackType = frame->getTrackType();
_codec = frame->getCodecId();
_key = frame->keyFrame();
_config = frame->configFrame();
}
virtual ~FrameCacheAble() = default;
/**
* 可以被缓存
* @return
*/
bool cacheAble() const override {
return true;
}
TrackType getTrackType() const override{
return _trackType;
}
CodecId getCodecId() const override{
return _codec;
}
bool keyFrame() const override{
return _key;
}
bool configFrame() const override{
return _config;
}
private:
Frame::Ptr _frame;
BufferRaw::Ptr _buffer;
TrackType _trackType;
CodecId _codec;
bool _key;
bool _config;
};
}//namespace mediakit
#endif //ZLMEDIAKIT_FRAME_H
#endif //ZLMEDIAKIT_FRAME_H
\ No newline at end of file
......@@ -19,76 +19,28 @@ namespace mediakit{
/**
* G711帧
*/
class G711Frame : public Frame {
class G711Frame : public FrameImp {
public:
typedef std::shared_ptr<G711Frame> Ptr;
char *data() const override{
return (char *)buffer.data();
}
uint32_t size() const override {
return buffer.size();
}
uint32_t dts() const override {
return timeStamp;
}
uint32_t prefixSize() const override{
return 0;
}
TrackType getTrackType() const override{
return TrackAudio;
}
CodecId getCodecId() const override{
return _codecId;
}
bool keyFrame() const override {
return false;
}
bool configFrame() const override{
return false;
G711Frame(){
_codecid = CodecG711A;
}
public:
CodecId _codecId = CodecG711A;
string buffer;
uint32_t timeStamp;
} ;
};
class G711FrameNoCacheAble : public FrameNoCacheAble {
class G711FrameNoCacheAble : public FrameFromPtr {
public:
typedef std::shared_ptr<G711FrameNoCacheAble> Ptr;
//兼容通用接口
G711FrameNoCacheAble(char *ptr,uint32_t size,uint32_t dts, uint32_t pts = 0,int prefixeSize = 0){
G711FrameNoCacheAble(char *ptr,uint32_t size,uint32_t dts, uint32_t pts = 0,int prefix_size = 0){
_ptr = ptr;
_size = size;
_dts = dts;
_prefixSize = prefixeSize;
_prefix_size = prefix_size;
}
//兼容通用接口
void setCodec(CodecId codecId){
_codecId = codecId;
}
G711FrameNoCacheAble(CodecId codecId, char *ptr,uint32_t size,uint32_t dts,int prefixeSize = 0){
_codecId = codecId;
_ptr = ptr;
_size = size;
_dts = dts;
_prefixSize = prefixeSize;
}
TrackType getTrackType() const override{
return TrackAudio;
}
CodecId getCodecId() const override{
return _codecId;
}
......@@ -108,67 +60,18 @@ private:
/**
* G711音频通道
*/
class G711Track : public AudioTrack{
class G711Track : public AudioTrackImp{
public:
typedef std::shared_ptr<G711Track> Ptr;
/**
* G711A G711U
*/
G711Track(CodecId codecId,int sample_rate, int channels, int sample_bit){
_codecid = codecId;
_sample_rate = sample_rate;
_channels = channels;
_sample_bit = sample_bit;
}
/**
* 返回编码类型
*/
CodecId getCodecId() const override{
return _codecid;
}
/**
* 是否已经初始化
*/
bool ready() override {
return true;
}
/**
* 返回音频采样率
*/
int getAudioSampleRate() const override{
return _sample_rate;
}
/**
* 返回音频采样位数,一般为16或8
*/
int getAudioSampleBit() const override{
return _sample_bit;
}
/**
* 返回音频通道数
*/
int getAudioChannel() const override{
return _channels;
}
G711Track(CodecId codecId,int sample_rate, int channels, int sample_bit) : AudioTrackImp(codecId,sample_rate,channels,sample_bit){}
private:
//克隆该Track
Track::Ptr clone() override {
return std::make_shared<std::remove_reference<decltype(*this)>::type >(*this);
}
//生成sdp
Sdp::Ptr getSdp() override ;
private:
CodecId _codecid;
int _sample_rate;
int _channels;
int _sample_bit;
};
/**
......@@ -180,37 +83,30 @@ public:
* G711采样率固定为8000
* @param codecId G711A G711U
* @param sample_rate 音频采样率
* @param playload_type rtp playload
* @param payload_type rtp payload
* @param bitrate 比特率
*/
G711Sdp(CodecId codecId,
int sample_rate,
int channels,
int playload_type = 98,
int bitrate = 128) : Sdp(sample_rate,playload_type), _codecId(codecId){
_printer << "m=audio 0 RTP/AVP " << playload_type << "\r\n";
_printer << "a=rtpmap:" << playload_type << (codecId == CodecG711A ? " PCMA/" : " PCMU/") << sample_rate << "/" << channels << "\r\n";
_printer << "a=control:trackID=" << getTrackType() << "\r\n";
int payload_type = 98,
int bitrate = 128) : Sdp(sample_rate,payload_type), _codecId(codecId){
_printer << "m=audio 0 RTP/AVP " << payload_type << "\r\n";
_printer << "a=rtpmap:" << payload_type << (codecId == CodecG711A ? " PCMA/" : " PCMU/") << sample_rate << "/" << channels << "\r\n";
_printer << "a=control:trackID=" << (int)TrackAudio << "\r\n";
}
string getSdp() const override {
return _printer;
}
TrackType getTrackType() const override {
return TrackAudio;
}
CodecId getCodecId() const override {
return _codecId;
}
private:
_StrPrinter _printer;
CodecId _codecId;
};
}//namespace mediakit
#endif //ZLMEDIAKIT_AAC_H
#endif //ZLMEDIAKIT_G711_H
\ No newline at end of file
......@@ -20,15 +20,15 @@ G711RtmpDecoder::G711RtmpDecoder(CodecId codecId) {
G711Frame::Ptr G711RtmpDecoder::obtainFrame() {
//从缓存池重新申请对象,防止覆盖已经写入环形缓存的对象
auto frame = ResourcePoolHelper<G711Frame>::obtainObj();
frame->buffer.clear();
frame->_codecId = _codecId;
frame->_buffer.clear();
frame->_codecid = _codecId;
return frame;
}
bool G711RtmpDecoder::inputRtmp(const RtmpPacket::Ptr &pkt, bool) {
//拷贝G711负载
_frame->buffer.assign(pkt->strBuf.data() + 1, pkt->strBuf.size() - 1);
_frame->timeStamp = pkt->timeStamp;
_frame->_buffer.assign(pkt->strBuf.data() + 1, pkt->strBuf.size() - 1);
_frame->_dts = pkt->timeStamp;
//写入环形缓存
RtmpCodec::inputFrame(_frame);
_frame = obtainFrame();
......
......@@ -33,10 +33,6 @@ public:
*/
bool inputRtmp(const RtmpPacket::Ptr &Rtmp, bool key_pos = false) override;
TrackType getTrackType() const override{
return TrackAudio;
}
CodecId getCodecId() const override{
return _codecId;
}
......
......@@ -20,9 +20,9 @@ G711RtpDecoder::G711RtpDecoder(const Track::Ptr &track){
G711Frame::Ptr G711RtpDecoder::obtainFrame() {
//从缓存池重新申请对象,防止覆盖已经写入环形缓存的对象
auto frame = ResourcePoolHelper<G711Frame>::obtainObj();
frame->buffer.clear();
frame->_codecId = _codecid;
frame->timeStamp = 0;
frame->_buffer.clear();
frame->_codecid = _codecid;
frame->_dts = 0;
return frame;
}
......@@ -32,17 +32,17 @@ bool G711RtpDecoder::inputRtp(const RtpPacket::Ptr &rtppack, bool) {
// 获取rtp数据
const char *rtp_packet_buf = rtppack->data() + rtppack->offset;
if (rtppack->timeStamp != _frame->timeStamp) {
if (rtppack->timeStamp != _frame->_dts) {
//时间戳变更,清空上一帧
onGetG711(_frame);
}
//追加数据
_frame->buffer.append(rtp_packet_buf, length);
_frame->_buffer.append(rtp_packet_buf, length);
//赋值时间戳
_frame->timeStamp = rtppack->timeStamp;
_frame->_dts = rtppack->timeStamp;
if (rtppack->mark || _frame->buffer.size() > 10 * 1024) {
if (rtppack->mark || _frame->_buffer.size() > 10 * 1024) {
//标记为mark时,或者内存快溢出时,我们认为这是该帧最后一个包
onGetG711(_frame);
}
......@@ -50,7 +50,7 @@ bool G711RtpDecoder::inputRtp(const RtpPacket::Ptr &rtppack, bool) {
}
void G711RtpDecoder::onGetG711(const G711Frame::Ptr &frame) {
if(!frame->buffer.empty()){
if(!frame->_buffer.empty()){
//写入环形缓存
RtpCodec::inputFrame(frame);
_frame = obtainFrame();
......@@ -62,12 +62,12 @@ void G711RtpDecoder::onGetG711(const G711Frame::Ptr &frame) {
G711RtpEncoder::G711RtpEncoder(uint32_t ui32Ssrc,
uint32_t ui32MtuSize,
uint32_t ui32SampleRate,
uint8_t ui8PlayloadType,
uint8_t ui8PayloadType,
uint8_t ui8Interleaved) :
RtpInfo(ui32Ssrc,
ui32MtuSize,
ui32SampleRate,
ui8PlayloadType,
ui8PayloadType,
ui8Interleaved) {
}
......@@ -96,6 +96,3 @@ void G711RtpEncoder::makeG711Rtp(const void *data, unsigned int len, bool mark,
}
}//namespace mediakit
......@@ -31,10 +31,6 @@ public:
*/
bool inputRtp(const RtpPacket::Ptr &rtp, bool key_pos = false) override;
TrackType getTrackType() const override{
return TrackAudio;
}
CodecId getCodecId() const override{
return _codecid;
}
......@@ -62,13 +58,13 @@ public:
* @param ui32Ssrc ssrc
* @param ui32MtuSize mtu 大小
* @param ui32SampleRate 采样率
* @param ui8PlayloadType pt类型
* @param ui8PayloadType pt类型
* @param ui8Interleaved rtsp interleaved 值
*/
G711RtpEncoder(uint32_t ui32Ssrc,
uint32_t ui32MtuSize,
uint32_t ui32SampleRate,
uint8_t ui8PlayloadType = 0,
uint8_t ui8PayloadType = 0,
uint8_t ui8Interleaved = TrackAudio * 2);
~G711RtpEncoder() {}
......
......@@ -44,34 +44,77 @@ const char *memfind(const char *buf, int len, const char *subbuf, int sublen) {
return NULL;
}
void splitH264(const char *ptr, int len, const std::function<void(const char *, int)> &cb) {
auto nal = ptr;
void splitH264(const char *ptr, int len, int prefix, const std::function<void(const char *, int, int)> &cb) {
auto start = ptr + prefix;
auto end = ptr + len;
while(true) {
auto next_nal = memfind(nal + 3,end - nal - 3,"\x0\x0\x1",3);
if(next_nal){
if(*(next_nal - 1) == 0x00){
next_nal -= 1;
int next_prefix;
while (true) {
auto next_start = memfind(start, end - start, "\x00\x00\x01", 3);
if (next_start) {
//找到下一帧
if (*(next_start - 1) == 0x00) {
//这个是00 00 00 01开头
next_start -= 1;
next_prefix = 4;
} else {
//这个是00 00 01开头
next_prefix = 3;
}
cb(nal,next_nal - nal);
nal = next_nal;
//记得加上本帧prefix长度
cb(start - prefix, next_start - start + prefix, prefix);
//搜索下一帧末尾的起始位置
start = next_start + next_prefix;
//记录下一帧的prefix长度
prefix = next_prefix;
continue;
}
cb(nal,end - nal);
//未找到下一帧,这是最后一帧
cb(start - prefix, end - start + prefix, prefix);
break;
}
}
int prefixSize(const char *ptr, int len){
if (len < 4) {
return 0;
}
if (ptr[0] != 0x00 || ptr[1] != 0x00) {
//不是0x00 00开头
return 0;
}
if (ptr[2] == 0x00 && ptr[3] == 0x01) {
//是0x00 00 00 01
return 4;
}
if (ptr[2] == 0x01) {
//是0x00 00 01
return 3;
}
return 0;
}
#if 0
//splitH264函数测试程序
static onceToken s_token([](){
char buf[] = "\x00\x00\x00\x01\x12\x23\x34\x45\x56"
"\x00\x00\x00\x01\x12\x23\x34\x45\x56"
"\x00\x00\x00\x01\x12\x23\x34\x45\x56"
"\x00\x00\x01\x12\x23\x34\x45\x56";
splitH264(buf, sizeof(buf) - 1, [](const char *ptr, int len){
cout << hexdump(ptr, len) << endl;
});
{
char buf[] = "\x00\x00\x00\x01\x12\x23\x34\x45\x56"
"\x00\x00\x00\x01\x23\x34\x45\x56"
"\x00\x00\x00\x01x34\x45\x56"
"\x00\x00\x01\x12\x23\x34\x45\x56";
splitH264(buf, sizeof(buf) - 1, 4, [](const char *ptr, int len, int prefix) {
cout << prefix << " " << hexdump(ptr, len) << endl;
});
}
{
char buf[] = "\x00\x00\x00\x01\x12\x23\x34\x45\x56";
splitH264(buf, sizeof(buf) - 1, 4, [](const char *ptr, int len, int prefix) {
cout << prefix << " " << hexdump(ptr, len) << endl;
});
}
});
#endif //0
......
......@@ -20,12 +20,12 @@ using namespace toolkit;
namespace mediakit{
bool getAVCInfo(const string &strSps,int &iVideoWidth, int &iVideoHeight, float &iVideoFps);
void splitH264(const char *ptr, int len, const std::function<void(const char *, int)> &cb);
void splitH264(const char *ptr, int len, int prefix, const std::function<void(const char *, int, int)> &cb);
int prefixSize(const char *ptr, int len);
/**
* 264帧类
*/
class H264Frame : public Frame {
class H264Frame : public FrameImp {
public:
typedef std::shared_ptr<H264Frame> Ptr;
......@@ -36,30 +36,8 @@ public:
NAL_SEI = 6,
} NalType;
char *data() const override{
return (char *)_buffer.data();
}
uint32_t size() const override {
return _buffer.size();
}
uint32_t dts() const override {
return _dts;
}
uint32_t pts() const override {
return _pts ? _pts : _dts;
}
uint32_t prefixSize() const override{
return _prefix_size;
}
TrackType getTrackType() const override{
return TrackVideo;
}
CodecId getCodecId() const override{
return CodecH264;
H264Frame(){
_codecid = CodecH264;
}
bool keyFrame() const override {
......@@ -69,39 +47,27 @@ public:
bool configFrame() const override{
switch(H264_TYPE(_buffer[_prefix_size]) ){
case H264Frame::NAL_SPS:
case H264Frame::NAL_PPS:
return true;
default:
return false;
case H264Frame::NAL_PPS:return true;
default:return false;
}
}
public:
uint32_t _dts = 0;
uint32_t _pts = 0;
uint32_t _prefix_size = 4;
string _buffer;
};
/**
* 防止内存拷贝的H264类
* 用户可以通过该类型快速把一个指针无拷贝的包装成Frame类
* 该类型在DevChannel中有使用
*/
class H264FrameNoCacheAble : public FrameNoCacheAble {
class H264FrameNoCacheAble : public FrameFromPtr {
public:
typedef std::shared_ptr<H264FrameNoCacheAble> Ptr;
H264FrameNoCacheAble(char *ptr,uint32_t size,uint32_t dts , uint32_t pts ,int prefixeSize = 4){
H264FrameNoCacheAble(char *ptr,uint32_t size,uint32_t dts , uint32_t pts ,int prefix_size = 4){
_ptr = ptr;
_size = size;
_dts = dts;
_pts = pts;
_prefixSize = prefixeSize;
}
TrackType getTrackType() const override{
return TrackVideo;
_prefix_size = prefix_size;
}
CodecId getCodecId() const override{
......@@ -109,43 +75,18 @@ public:
}
bool keyFrame() const override {
return H264_TYPE(_ptr[_prefixSize]) == H264Frame::NAL_IDR;
return H264_TYPE(_ptr[_prefix_size]) == H264Frame::NAL_IDR;
}
bool configFrame() const override{
switch(H264_TYPE(_ptr[_prefixSize])){
switch(H264_TYPE(_ptr[_prefix_size])){
case H264Frame::NAL_SPS:
case H264Frame::NAL_PPS:
return true;
default:
return false;
case H264Frame::NAL_PPS:return true;
default:return false;
}
}
};
/**
* 一个H264Frame类中可以有多个帧,他们通过 0x 00 00 01 分隔
* ZLMediaKit会先把这种复合帧split成单个帧然后再处理
* 一个复合帧可以通过无内存拷贝的方式切割成多个H264FrameSubFrame
* 提供该类的目的是切换复合帧时防止内存拷贝,提高性能
*/
template<typename Parent>
class FrameInternal : public Parent{
public:
typedef std::shared_ptr<FrameInternal> Ptr;
FrameInternal(const Frame::Ptr &parent_frame,
char *ptr,
uint32_t size,
int prefixeSize) : Parent(ptr,size,parent_frame->dts(),parent_frame->pts(),prefixeSize){
_parent_frame = parent_frame;
}
bool cacheAble() const override {
return _parent_frame->cacheAble();
}
private:
Frame::Ptr _parent_frame;
};
typedef FrameInternal<H264FrameNoCacheAble> H264FrameInternal;
/**
......@@ -243,25 +184,10 @@ public:
int type = H264_TYPE(*((uint8_t *)frame->data() + frame->prefixSize()));
if(type == H264Frame::NAL_SPS || type == H264Frame::NAL_SEI){
//有些设备会把SPS PPS IDR帧当做一个帧打包,所以我们要split一下
bool first_frame = true;
splitH264(frame->data() + frame->prefixSize(),
frame->size() - frame->prefixSize(),
[&](const char *ptr, int len){
if(first_frame){
H264FrameInternal::Ptr sub_frame = std::make_shared<H264FrameInternal>(frame,
frame->data(),
len + frame->prefixSize(),
frame->prefixSize());
inputFrame_l(sub_frame);
first_frame = false;
}else{
H264FrameInternal::Ptr sub_frame = std::make_shared<H264FrameInternal>(frame,
(char *)ptr,
len ,
3);
inputFrame_l(sub_frame);
}
});
splitH264(frame->data(), frame->size(), frame->prefixSize(), [&](const char *ptr, int len, int prefix) {
H264FrameInternal::Ptr sub_frame = std::make_shared<H264FrameInternal>(frame, (char *)ptr, len, prefix);
inputFrame_l(sub_frame);
});
} else{
inputFrame_l(frame);
}
......@@ -302,6 +228,11 @@ private:
}
break;
case H264Frame::NAL_SEI:{
//忽略SEI
break;
}
default:
VideoTrack::inputFrame(frame);
break;
......@@ -349,29 +280,27 @@ private:
bool _last_frame_is_idr = false;
};
/**
* h264类型sdp
*/
class H264Sdp : public Sdp {
public:
/**
*
* @param sps 264 sps,不带0x00000001头
* @param pps 264 pps,不带0x00000001头
* @param playload_type rtp playload type 默认96
* @param payload_type rtp payload type 默认96
* @param bitrate 比特率
*/
H264Sdp(const string &strSPS,
const string &strPPS,
int playload_type = 96,
int bitrate = 4000) : Sdp(90000,playload_type) {
int payload_type = 96,
int bitrate = 4000) : Sdp(90000,payload_type) {
//视频通道
_printer << "m=video 0 RTP/AVP " << playload_type << "\r\n";
_printer << "m=video 0 RTP/AVP " << payload_type << "\r\n";
_printer << "b=AS:" << bitrate << "\r\n";
_printer << "a=rtpmap:" << playload_type << " H264/" << 90000 << "\r\n";
_printer << "a=fmtp:" << playload_type << " packetization-mode=1; profile-level-id=";
_printer << "a=rtpmap:" << payload_type << " H264/" << 90000 << "\r\n";
_printer << "a=fmtp:" << payload_type << " packetization-mode=1; profile-level-id=";
char strTemp[100];
uint32_t profile_level_id = 0;
......@@ -390,17 +319,13 @@ public:
memset(strTemp, 0, 100);
av_base64_encode(strTemp, 100, (uint8_t *) strPPS.data(), strPPS.size());
_printer << strTemp << "\r\n";
_printer << "a=control:trackID=" << getTrackType() << "\r\n";
_printer << "a=control:trackID=" << (int)TrackVideo << "\r\n";
}
string getSdp() const override {
return _printer;
}
TrackType getTrackType() const override {
return TrackVideo;
}
CodecId getCodecId() const override {
return CodecH264;
}
......@@ -408,8 +333,5 @@ private:
_StrPrinter _printer;
};
}//namespace mediakit
#endif //ZLMEDIAKIT_H264_H
#endif //ZLMEDIAKIT_H264_H
\ No newline at end of file
......@@ -36,10 +36,6 @@ public:
*/
bool inputRtmp(const RtmpPacket::Ptr &rtmp, bool key_pos = true) override;
TrackType getTrackType() const override{
return TrackVideo;
}
CodecId getCodecId() const override{
return CodecH264;
}
......
......@@ -157,7 +157,7 @@ bool H264RtpDecoder::decodeRtp(const RtpPacket::Ptr &rtppack) {
if (rtppack->sequence != _lastSeq + 1 && rtppack->sequence != 0) {
//中间的或末尾的rtp包,其seq必须连续(如果回环了则判定为连续),否则说明rtp丢包,那么该帧不完整,必须得丢弃
_h264frame->_buffer.clear();
WarnL << "rtp sequence不连续: " << rtppack->sequence << " != " << _lastSeq << " + 1,该帧被废弃";
WarnL << "rtp丢包: " << rtppack->sequence << " != " << _lastSeq << " + 1,该帧被废弃";
return false;
}
......@@ -204,12 +204,12 @@ void H264RtpDecoder::onGetH264(const H264Frame::Ptr &frame) {
H264RtpEncoder::H264RtpEncoder(uint32_t ui32Ssrc,
uint32_t ui32MtuSize,
uint32_t ui32SampleRate,
uint8_t ui8PlayloadType,
uint8_t ui8PayloadType,
uint8_t ui8Interleaved) :
RtpInfo(ui32Ssrc,
ui32MtuSize,
ui32SampleRate,
ui8PlayloadType,
ui8PayloadType,
ui8Interleaved) {
}
......
......@@ -38,10 +38,6 @@ public:
*/
bool inputRtp(const RtpPacket::Ptr &rtp, bool key_pos = true) override;
TrackType getTrackType() const override{
return TrackVideo;
}
CodecId getCodecId() const override{
return CodecH264;
}
......@@ -66,13 +62,13 @@ public:
* @param ui32Ssrc ssrc
* @param ui32MtuSize mtu大小
* @param ui32SampleRate 采样率,强制为90000
* @param ui8PlayloadType pt类型
* @param ui8PayloadType pt类型
* @param ui8Interleaved rtsp interleaved
*/
H264RtpEncoder(uint32_t ui32Ssrc,
uint32_t ui32MtuSize = 1400,
uint32_t ui32SampleRate = 90000,
uint8_t ui8PlayloadType = 96,
uint8_t ui8PayloadType = 96,
uint8_t ui8Interleaved = TrackVideo * 2);
~H264RtpEncoder() {}
......
......@@ -23,9 +23,9 @@ namespace mediakit {
bool getHEVCInfo(const string &strVps, const string &strSps, int &iVideoWidth, int &iVideoHeight, float &iVideoFps);
/**
* 265帧类
*/
class H265Frame : public Frame {
* 265帧类
*/
class H265Frame : public FrameImp {
public:
typedef std::shared_ptr<H265Frame> Ptr;
......@@ -60,32 +60,8 @@ public:
NAL_SEI_SUFFIX = 40,
} NaleType;
char *data() const override {
return (char *) _buffer.data();
}
uint32_t size() const override {
return _buffer.size();
}
uint32_t dts() const override {
return _dts;
}
uint32_t pts() const override {
return _pts ? _pts : _dts;
}
uint32_t prefixSize() const override {
return _prefix_size;
}
TrackType getTrackType() const override {
return TrackVideo;
}
CodecId getCodecId() const override {
return CodecH265;
H265Frame(){
_codecid = CodecH265;
}
bool keyFrame() const override {
......@@ -96,39 +72,26 @@ public:
switch(H265_TYPE(_buffer[_prefix_size])){
case H265Frame::NAL_VPS:
case H265Frame::NAL_SPS:
case H265Frame::NAL_PPS:
return true;
default:
return false;
case H265Frame::NAL_PPS : return true;
default : return false;
}
}
static bool isKeyFrame(int type) {
return type >= NAL_BLA_W_LP && type <= NAL_RSV_IRAP_VCL23;
}
public:
uint32_t _dts = 0;
uint32_t _pts = 0;
uint32_t _prefix_size = 4;
string _buffer;
};
class H265FrameNoCacheAble : public FrameNoCacheAble {
class H265FrameNoCacheAble : public FrameFromPtr {
public:
typedef std::shared_ptr<H265FrameNoCacheAble> Ptr;
H265FrameNoCacheAble(char *ptr, uint32_t size, uint32_t dts,uint32_t pts, int prefixeSize = 4) {
H265FrameNoCacheAble(char *ptr, uint32_t size, uint32_t dts,uint32_t pts, int prefix_size = 4) {
_ptr = ptr;
_size = size;
_dts = dts;
_pts = pts;
_prefixSize = prefixeSize;
}
TrackType getTrackType() const override {
return TrackVideo;
_prefix_size = prefix_size;
}
CodecId getCodecId() const override {
......@@ -136,17 +99,15 @@ public:
}
bool keyFrame() const override {
return H265Frame::isKeyFrame(H265_TYPE(((uint8_t *) _ptr)[_prefixSize]));
return H265Frame::isKeyFrame(H265_TYPE(((uint8_t *) _ptr)[_prefix_size]));
}
bool configFrame() const override{
switch(H265_TYPE(((uint8_t *) _ptr)[_prefixSize])){
switch(H265_TYPE(((uint8_t *) _ptr)[_prefix_size])){
case H265Frame::NAL_VPS:
case H265Frame::NAL_SPS:
case H265Frame::NAL_PPS:
return true;
default:
return false;
case H265Frame::NAL_PPS:return true;
default:return false;
}
}
};
......@@ -184,7 +145,6 @@ public:
/**
* 返回不带0x00 00 00 01头的vps
* @return
*/
const string &getVps() const {
return _vps;
......@@ -192,7 +152,6 @@ public:
/**
* 返回不带0x00 00 00 01头的sps
* @return
*/
const string &getSps() const {
return _sps;
......@@ -200,7 +159,6 @@ public:
/**
* 返回不带0x00 00 00 01头的pps
* @return
*/
const string &getPps() const {
return _pps;
......@@ -212,7 +170,6 @@ public:
/**
* 返回视频高度
* @return
*/
int getVideoHeight() const override{
return _height ;
......@@ -220,7 +177,6 @@ public:
/**
* 返回视频宽度
* @return
*/
int getVideoWidth() const override{
return _width;
......@@ -228,7 +184,6 @@ public:
/**
* 返回视频fps
* @return
*/
float getVideoFps() const override{
return _fps;
......@@ -238,36 +193,20 @@ public:
return !_vps.empty() && !_sps.empty() && !_pps.empty();
}
/**
* 输入数据帧,并获取sps pps
* @param frame 数据帧
*/
* 输入数据帧,并获取sps pps
* @param frame 数据帧
*/
void inputFrame(const Frame::Ptr &frame) override{
int type = H265_TYPE(*((uint8_t *)frame->data() + frame->prefixSize()));
if(frame->configFrame()){
bool first_frame = true;
splitH264(frame->data() + frame->prefixSize(),
frame->size() - frame->prefixSize(),
[&](const char *ptr, int len){
if(first_frame){
H265FrameInternal::Ptr sub_frame = std::make_shared<H265FrameInternal>(frame,
frame->data(),
len + frame->prefixSize(),
frame->prefixSize());
inputFrame_l(sub_frame);
first_frame = false;
}else{
H265FrameInternal::Ptr sub_frame = std::make_shared<H265FrameInternal>(frame,
(char *)ptr,
len ,
3);
inputFrame_l(sub_frame);
}
});
}else{
inputFrame_l(frame);
}
if(frame->configFrame() || type == H265Frame::NAL_SEI_PREFIX){
splitH264(frame->data(), frame->size(), frame->prefixSize(), [&](const char *ptr, int len, int prefix){
H265FrameInternal::Ptr sub_frame = std::make_shared<H265FrameInternal>(frame, (char*)ptr, len, prefix);
inputFrame_l(sub_frame);
});
} else {
inputFrame_l(frame);
}
}
private:
......@@ -367,47 +306,41 @@ private:
bool _last_frame_is_idr = false;
};
/**
* h265类型sdp
*/
class H265Sdp : public Sdp {
public:
/**
*
* 构造函数
* @param sps 265 sps,不带0x00000001头
* @param pps 265 pps,不带0x00000001头
* @param playload_type rtp playload type 默认96
* @param payload_type rtp payload type 默认96
* @param bitrate 比特率
*/
H265Sdp(const string &strVPS,
const string &strSPS,
const string &strPPS,
int playload_type = 96,
int bitrate = 4000) : Sdp(90000,playload_type) {
int payload_type = 96,
int bitrate = 4000) : Sdp(90000,payload_type) {
//视频通道
_printer << "m=video 0 RTP/AVP " << playload_type << "\r\n";
_printer << "m=video 0 RTP/AVP " << payload_type << "\r\n";
_printer << "b=AS:" << bitrate << "\r\n";
_printer << "a=rtpmap:" << playload_type << " H265/" << 90000 << "\r\n";
_printer << "a=fmtp:" << playload_type << " ";
_printer << "a=rtpmap:" << payload_type << " H265/" << 90000 << "\r\n";
_printer << "a=fmtp:" << payload_type << " ";
_printer << "sprop-vps=";
_printer << encodeBase64(strVPS) << "; ";
_printer << "sprop-sps=";
_printer << encodeBase64(strSPS) << "; ";
_printer << "sprop-pps=";
_printer << encodeBase64(strPPS) << "\r\n";
_printer << "a=control:trackID=" << getTrackType() << "\r\n";
_printer << "a=control:trackID=" << (int)TrackVideo << "\r\n";
}
string getSdp() const override {
return _printer;
}
TrackType getTrackType() const override {
return TrackVideo;
}
CodecId getCodecId() const override {
return CodecH265;
}
......@@ -415,9 +348,5 @@ private:
_StrPrinter _printer;
};
}//namespace mediakit
#endif //ZLMEDIAKIT_H265_H
#endif //ZLMEDIAKIT_H265_H
\ No newline at end of file
......@@ -36,10 +36,6 @@ public:
*/
bool inputRtmp(const RtmpPacket::Ptr &rtmp, bool key_pos = true) override;
TrackType getTrackType() const override{
return TrackVideo;
}
CodecId getCodecId() const override{
return CodecH265;
}
......
......@@ -96,7 +96,7 @@ bool H265RtpDecoder::decodeRtp(const RtpPacket::Ptr &rtppack) {
if (rtppack->sequence != _lastSeq + 1 && rtppack->sequence != 0) {
//中间的或末尾的rtp包,其seq必须连续(如果回环了则判定为连续),否则说明rtp丢包,那么该帧不完整,必须得丢弃
_h265frame->_buffer.clear();
WarnL << "rtp sequence不连续: " << rtppack->sequence << " != " << _lastSeq << " + 1,该帧被废弃";
WarnL << "rtp丢包: " << rtppack->sequence << " != " << _lastSeq << " + 1,该帧被废弃";
return false;
}
......@@ -140,12 +140,12 @@ void H265RtpDecoder::onGetH265(const H265Frame::Ptr &frame) {
H265RtpEncoder::H265RtpEncoder(uint32_t ui32Ssrc,
uint32_t ui32MtuSize,
uint32_t ui32SampleRate,
uint8_t ui8PlayloadType,
uint8_t ui8PayloadType,
uint8_t ui8Interleaved) :
RtpInfo(ui32Ssrc,
ui32MtuSize,
ui32SampleRate,
ui8PlayloadType,
ui8PayloadType,
ui8Interleaved) {
}
......
......@@ -39,10 +39,6 @@ public:
*/
bool inputRtp(const RtpPacket::Ptr &rtp, bool key_pos = true) override;
TrackType getTrackType() const override{
return TrackVideo;
}
CodecId getCodecId() const override{
return CodecH265;
}
......@@ -67,13 +63,13 @@ public:
* @param ui32Ssrc ssrc
* @param ui32MtuSize mtu大小
* @param ui32SampleRate 采样率,强制为90000
* @param ui8PlayloadType pt类型
* @param ui8PayloadType pt类型
* @param ui8Interleaved rtsp interleaved
*/
H265RtpEncoder(uint32_t ui32Ssrc,
uint32_t ui32MtuSize = 1400,
uint32_t ui32SampleRate = 90000,
uint8_t ui8PlayloadType = 96,
uint8_t ui8PayloadType = 96,
uint8_t ui8Interleaved = TrackVideo * 2);
~H265RtpEncoder() {}
......
/*
* Copyright (c) 2016 The ZLMediaKit project authors. All Rights Reserved.
*
* This file is part of ZLMediaKit(https://github.com/xiongziliang/ZLMediaKit).
*
* Use of this source code is governed by MIT license that can be found in the
* LICENSE file in the root of the source tree. All contributing project authors
* may be found in the AUTHORS file in the root of the source tree.
*/
#include "Opus.h"
namespace mediakit{
Sdp::Ptr OpusTrack::getSdp() {
if(!ready()){
WarnL << getCodecName() << " Track未准备好";
return nullptr;
}
return std::make_shared<OpusSdp>(getAudioSampleRate(), getAudioChannel());
}
}//namespace mediakit
\ No newline at end of file
/*
* Copyright (c) 2016 The ZLMediaKit project authors. All Rights Reserved.
*
* This file is part of ZLMediaKit(https://github.com/xiongziliang/ZLMediaKit).
*
* Use of this source code is governed by MIT license that can be found in the
* LICENSE file in the root of the source tree. All contributing project authors
* may be found in the AUTHORS file in the root of the source tree.
*/
#ifndef ZLMEDIAKIT_OPUS_H
#define ZLMEDIAKIT_OPUS_H
#include "Frame.h"
#include "Track.h"
namespace mediakit{
/**
* Opus帧
*/
class OpusFrame : public FrameImp {
public:
typedef std::shared_ptr<OpusFrame> Ptr;
OpusFrame(){
_codecid = CodecOpus;
}
};
/**
* 不可缓存的Opus帧
*/
class OpusFrameNoCacheAble : public FrameFromPtr {
public:
typedef std::shared_ptr<OpusFrameNoCacheAble> Ptr;
OpusFrameNoCacheAble(char *ptr,uint32_t size,uint32_t dts, uint32_t pts = 0,int prefix_size = 0){
_ptr = ptr;
_size = size;
_dts = dts;
_prefix_size = prefix_size;
}
CodecId getCodecId() const override{
return CodecOpus;
}
bool keyFrame() const override {
return false;
}
bool configFrame() const override{
return false;
}
};
/**
* Opus帧音频通道
*/
class OpusTrack : public AudioTrackImp{
public:
typedef std::shared_ptr<OpusTrack> Ptr;
OpusTrack(int sample_rate, int channels, int sample_bit) : AudioTrackImp(CodecOpus,sample_rate,channels,sample_bit){}
private:
//克隆该Track
Track::Ptr clone() override {
return std::make_shared<std::remove_reference<decltype(*this)>::type >(*this);
}
//生成sdp
Sdp::Ptr getSdp() override ;
};
/**
* Opus类型SDP
*/
class OpusSdp : public Sdp {
public:
/**
* 构造opus sdp
* @param sample_rate 音频采样率
* @param payload_type rtp payload
* @param bitrate 比特率
*/
OpusSdp(int sample_rate,
int channels,
int payload_type = 98,
int bitrate = 128) : Sdp(sample_rate,payload_type){
_printer << "m=audio 0 RTP/AVP " << payload_type << "\r\n";
_printer << "a=rtpmap:" << payload_type << " opus/" << sample_rate << "/" << channels << "\r\n";
_printer << "a=control:trackID=" << (int)TrackAudio << "\r\n";
}
string getSdp() const override {
return _printer;
}
CodecId getCodecId() const override {
return CodecOpus;
}
private:
_StrPrinter _printer;
};
}//namespace mediakit
#endif //ZLMEDIAKIT_OPUS_H
......@@ -65,8 +65,6 @@ class VideoTrack : public Track {
public:
typedef std::shared_ptr<VideoTrack> Ptr;
TrackType getTrackType() const override { return TrackVideo;};
/**
* 返回视频高度
* @return
......@@ -93,8 +91,6 @@ class AudioTrack : public Track {
public:
typedef std::shared_ptr<AudioTrack> Ptr;
TrackType getTrackType() const override { return TrackAudio;};
/**
* 返回音频采样率
* @return
......@@ -114,6 +110,64 @@ public:
virtual int getAudioChannel() const {return 0;};
};
class AudioTrackImp : public AudioTrack{
public:
typedef std::shared_ptr<AudioTrackImp> Ptr;
/**
* 构造函数
* @param codecId 编码类型
* @param sample_rate 采样率(HZ)
* @param channels 通道数
* @param sample_bit 采样位数,一般为16
*/
AudioTrackImp(CodecId codecId,int sample_rate, int channels, int sample_bit){
_codecid = codecId;
_sample_rate = sample_rate;
_channels = channels;
_sample_bit = sample_bit;
}
/**
* 返回编码类型
*/
CodecId getCodecId() const override{
return _codecid;
}
/**
* 是否已经初始化
*/
bool ready() override {
return true;
}
/**
* 返回音频采样率
*/
int getAudioSampleRate() const override{
return _sample_rate;
}
/**
* 返回音频采样位数,一般为16或8
*/
int getAudioSampleBit() const override{
return _sample_bit;
}
/**
* 返回音频通道数
*/
int getAudioChannel() const override{
return _channels;
}
private:
CodecId _codecid;
int _sample_rate;
int _channels;
int _sample_bit;
};
class TrackSource{
public:
......@@ -123,7 +177,6 @@ public:
/**
* 获取全部的Track
* @param trackReady 是否获取全部已经准备好的Track
* @return
*/
virtual vector<Track::Ptr> getTracks(bool trackReady = true) const = 0;
......@@ -131,7 +184,6 @@ public:
* 获取特定Track
* @param type track类型
* @param trackReady 是否获取全部已经准备好的Track
* @return
*/
Track::Ptr getTrack(TrackType type , bool trackReady = true) const {
auto tracks = getTracks(trackReady);
......@@ -145,5 +197,4 @@ public:
};
}//namespace mediakit
#endif //ZLMEDIAKIT_TRACK_H
#endif //ZLMEDIAKIT_TRACK_H
\ No newline at end of file
/*
* Copyright (c) 2020 The ZLMediaKit project authors. All Rights Reserved.
*
* This file is part of ZLMediaKit(https://github.com/xiongziliang/ZLMediaKit).
*
* Use of this source code is governed by MIT license that can be found in the
* LICENSE file in the root of the source tree. All contributing project authors
* may be found in the AUTHORS file in the root of the source tree.
*/
#include <cstdlib>
#include "HlsParser.h"
#include "Util/util.h"
#include "Common/Parser.h"
using namespace toolkit;
namespace mediakit {
bool HlsParser::parse(const string &http_url, const string &m3u8) {
float extinf_dur = 0;
ts_segment segment;
map<int, ts_segment> ts_map;
_total_dur = 0;
_is_live = true;
_is_m3u8_inner = false;
int index = 0;
auto lines = split(m3u8, "\n");
for (auto &line : lines) {
trim(line);
if (line.size() < 2) {
continue;
}
if ((_is_m3u8_inner || extinf_dur != 0) && line[0] != '#') {
segment.duration = extinf_dur;
if (line.find("http://") == 0 || line.find("https://") == 0) {
segment.url = line;
} else {
if (line.find("/") == 0) {
segment.url = http_url.substr(0, http_url.find("/", 8)) + line;
} else {
segment.url = http_url.substr(0, http_url.rfind("/") + 1) + line;
}
}
if (!_is_m3u8_inner) {
//ts按照先后顺序排序
ts_map.emplace(index++, segment);
} else {
//子m3u8按照带宽排序
ts_map.emplace(segment.bandwidth, segment);
}
extinf_dur = 0;
continue;
}
_is_m3u8_inner = false;
if (line.find("#EXTINF:") == 0) {
sscanf(line.data(), "#EXTINF:%f,", &extinf_dur);
_total_dur += extinf_dur;
continue;
}
static const string s_stream_inf = "#EXT-X-STREAM-INF:";
if (line.find(s_stream_inf) == 0) {
_is_m3u8_inner = true;
auto key_val = Parser::parseArgs(line.substr(s_stream_inf.size()), ",", "=");
segment.program_id = atoi(key_val["PROGRAM-ID"].data());
segment.bandwidth = atoi(key_val["BANDWIDTH"].data());
sscanf(key_val["RESOLUTION"].data(), "%dx%d", &segment.width, &segment.height);
continue;
}
if (line == "#EXTM3U") {
_is_m3u8 = true;
continue;
}
if (line.find("#EXT-X-ALLOW-CACHE:") == 0) {
_allow_cache = (line.find(":YES") != string::npos);
continue;
}
if (line.find("#EXT-X-VERSION:") == 0) {
sscanf(line.data(), "#EXT-X-VERSION:%d", &_version);
continue;
}
if (line.find("#EXT-X-TARGETDURATION:") == 0) {
sscanf(line.data(), "#EXT-X-TARGETDURATION:%d", &_target_dur);
continue;
}
if (line.find("#EXT-X-MEDIA-SEQUENCE:") == 0) {
sscanf(line.data(), "#EXT-X-MEDIA-SEQUENCE:%lld", &_sequence);
continue;
}
if (line.find("#EXT-X-ENDLIST") == 0) {
//点播
_is_live = false;
continue;
}
continue;
}
if (_is_m3u8) {
onParsed(_is_m3u8_inner, _sequence, ts_map);
}
return _is_m3u8;
}
bool HlsParser::isM3u8() const {
return _is_m3u8;
}
bool HlsParser::isLive() const{
return _is_live;
}
bool HlsParser::allowCache() const {
return _allow_cache;
}
int HlsParser::getVersion() const {
return _version;
}
int HlsParser::getTargetDur() const {
return _target_dur;
}
int HlsParser::getSequence() const {
return _sequence;
}
bool HlsParser::isM3u8Inner() const {
return _is_m3u8_inner;
}
}//namespace mediakit
\ No newline at end of file
/*
* Copyright (c) 2020 The ZLMediaKit project authors. All Rights Reserved.
*
* This file is part of ZLMediaKit(https://github.com/xiongziliang/ZLMediaKit).
*
* Use of this source code is governed by MIT license that can be found in the
* LICENSE file in the root of the source tree. All contributing project authors
* may be found in the AUTHORS file in the root of the source tree.
*/
#ifndef HTTP_HLSPARSER_H
#define HTTP_HLSPARSER_H
#include <string>
#include <list>
#include <map>
using namespace std;
namespace mediakit {
typedef struct{
//url地址
string url;
//ts切片长度
float duration;
//////内嵌m3u8//////
//节目id
int program_id;
//带宽
int bandwidth;
//宽度
int width;
//高度
int height;
} ts_segment;
class HlsParser {
public:
HlsParser(){}
~HlsParser(){}
bool parse(const string &http_url,const string &m3u8);
/**
* 是否存在#EXTM3U字段,是否为m3u8文件
*/
bool isM3u8() const;
/**
* #EXT-X-ALLOW-CACHE值,是否允许cache
*/
bool allowCache() const;
/**
* 是否存在#EXT-X-ENDLIST字段,是否为直播
*/
bool isLive() const ;
/**
* #EXT-X-VERSION值,版本号
*/
int getVersion() const;
/**
* #EXT-X-TARGETDURATION字段值
*/
int getTargetDur() const;
/**
* #EXT-X-MEDIA-SEQUENCE字段值,该m3u8序号
*/
int getSequence() const;
/**
* 内部是否含有子m3u8
*/
bool isM3u8Inner() const;
protected:
//解析出ts文件地址回调
virtual void onParsed(bool is_m3u8_inner,int64_t sequence,const map<int,ts_segment> &ts_list) {};
private:
bool _is_m3u8 = false;
bool _allow_cache = false;
bool _is_live = true;
int _version = 0;
int _target_dur = 0;
float _total_dur = 0;
int64_t _sequence = 0;
//每部是否有m3u8
bool _is_m3u8_inner = false;
};
}//namespace mediakit
#endif //HTTP_HLSPARSER_H
/*
* Copyright (c) 2020 The ZLMediaKit project authors. All Rights Reserved.
*
* This file is part of ZLMediaKit(https://github.com/xiongziliang/ZLMediaKit).
*
* Use of this source code is governed by MIT license that can be found in the
* LICENSE file in the root of the source tree. All contributing project authors
* may be found in the AUTHORS file in the root of the source tree.
*/
#include "HlsPlayer.h"
namespace mediakit {
HlsPlayer::HlsPlayer(const EventPoller::Ptr &poller){
_segment.setOnSegment([this](const char *data, uint64_t len) { onPacket(data, len); });
_poller = poller ? poller : EventPollerPool::Instance().getPoller();
}
HlsPlayer::~HlsPlayer() {}
void HlsPlayer::play(const string &strUrl) {
_m3u8_list.emplace_back(strUrl);
play_l();
}
void HlsPlayer::play_l(){
if (_m3u8_list.empty()) {
teardown_l(SockException(Err_shutdown, "所有hls url都尝试播放失败!"));
return;
}
float playTimeOutSec = (*this)[Client::kTimeoutMS].as<int>() / 1000.0;
setMethod("GET");
if(!(*this)[kNetAdapter].empty()) {
setNetAdapter((*this)[kNetAdapter]);
}
sendRequest(_m3u8_list.back(), playTimeOutSec);
}
void HlsPlayer::teardown_l(const SockException &ex){
_timer.reset();
_timer_ts.reset();
_http_ts_player.reset();
shutdown(ex);
}
void HlsPlayer::teardown() {
teardown_l(SockException(Err_shutdown,"teardown"));
}
void HlsPlayer::playNextTs(bool force){
if (_ts_list.empty()) {
//播放列表为空,那么立即重新下载m3u8文件
_timer.reset();
play_l();
return;
}
if (!force && _http_ts_player && _http_ts_player->alive()) {
//播放器目前还存活,正在下载中
return;
}
auto ts_duration = _ts_list.front().duration * 1000;
weak_ptr<HlsPlayer> weakSelf = dynamic_pointer_cast<HlsPlayer>(shared_from_this());
std::shared_ptr<Ticker> ticker(new Ticker);
_http_ts_player = std::make_shared<HttpTSPlayer>(getPoller(), false);
_http_ts_player->setOnDisconnect([weakSelf, ticker, ts_duration](const SockException &err) {
auto strongSelf = weakSelf.lock();
if (!strongSelf) {
return;
}
auto delay = ts_duration - 500 - ticker->elapsedTime();
if (delay <= 0) {
//播放这个ts切片花费了太长时间,我们立即下一个切片的播放
strongSelf->playNextTs(true);
} else {
//下一个切片慢点播放
strongSelf->_timer_ts.reset(new Timer(delay / 1000.0, [weakSelf, delay]() {
auto strongSelf = weakSelf.lock();
if (!strongSelf) {
return false;
}
strongSelf->playNextTs(true);
return false;
}, strongSelf->getPoller()));
}
});
_http_ts_player->setOnPacket([weakSelf](const char *data, uint64_t len) {
auto strongSelf = weakSelf.lock();
if (!strongSelf) {
return;
}
//收到ts包
strongSelf->onPacket_l(data, len);
});
_http_ts_player->setMethod("GET");
if(!(*this)[kNetAdapter].empty()) {
_http_ts_player->setNetAdapter((*this)[Client::kNetAdapter]);
}
_http_ts_player->sendRequest(_ts_list.front().url, 2 * _ts_list.front().duration);
_ts_list.pop_front();
}
void HlsPlayer::onParsed(bool is_m3u8_inner,int64_t sequence,const map<int,ts_segment> &ts_map){
if (!is_m3u8_inner) {
//这是ts播放列表
if (_last_sequence == sequence) {
return;
}
_last_sequence = sequence;
for (auto &pr : ts_map) {
auto &ts = pr.second;
if (_ts_url_cache.emplace(ts.url).second) {
//该ts未重复
_ts_list.emplace_back(ts);
//按时间排序
_ts_url_sort.emplace_back(ts.url);
}
}
if (_ts_url_sort.size() > 2 * ts_map.size()) {
//去除防重列表中过多的数据
_ts_url_cache.erase(_ts_url_sort.front());
_ts_url_sort.pop_front();
}
playNextTs();
} else {
//这是m3u8列表,我们播放最高清的子hls
if (ts_map.empty()) {
teardown_l(SockException(Err_shutdown, StrPrinter << "empty sub hls list:" + getUrl()));
return;
}
_timer.reset();
weak_ptr<HlsPlayer> weakSelf = dynamic_pointer_cast<HlsPlayer>(shared_from_this());
auto url = ts_map.rbegin()->second.url;
getPoller()->async([weakSelf, url]() {
auto strongSelf = weakSelf.lock();
if (strongSelf) {
strongSelf->play(url);
}
}, false);
}
}
int64_t HlsPlayer::onResponseHeader(const string &status, const HttpClient::HttpHeader &headers) {
if (status != "200" && status != "206") {
//失败
teardown_l(SockException(Err_shutdown, StrPrinter << "bad http status code:" + status));
return 0;
}
auto contet_type = const_cast< HttpClient::HttpHeader &>(headers)["Content-Type"];
_is_m3u8 = (contet_type.find("application/vnd.apple.mpegurl") == 0);
return -1;
}
void HlsPlayer::onResponseBody(const char *buf, int64_t size, int64_t recvedSize, int64_t totalSize) {
if (recvedSize == size) {
//刚开始
_m3u8.clear();
}
_m3u8.append(buf, size);
}
void HlsPlayer::onResponseCompleted() {
if (HlsParser::parse(getUrl(), _m3u8)) {
playDelay();
if (_first) {
_first = false;
onPlayResult(SockException(Err_success, "play success"));
}
} else {
teardown_l(SockException(Err_shutdown, "解析m3u8文件失败"));
}
}
float HlsPlayer::delaySecond(){
if (HlsParser::isM3u8() && HlsParser::getTargetDur() > 0) {
return HlsParser::getTargetDur();
}
return 1;
}
void HlsPlayer::onDisconnect(const SockException &ex) {
if (_first) {
//第一次失败,则播放失败
_first = false;
onPlayResult(ex);
return;
}
//主动shutdown
if (ex.getErrCode() == Err_shutdown) {
if (_m3u8_list.size() <= 1) {
//全部url都播放失败
onShutdown(ex);
} else {
_m3u8_list.pop_back();
//还有上一级url可以重试播放
play_l();
}
return;
}
//eof等,之后播放失败,那么重试播放m3u8
playDelay();
}
bool HlsPlayer::onRedirectUrl(const string &url,bool temporary) {
_m3u8_list.emplace_back(url);
return true;
}
void HlsPlayer::playDelay(){
weak_ptr<HlsPlayer> weakSelf = dynamic_pointer_cast<HlsPlayer>(shared_from_this());
_timer.reset(new Timer(delaySecond(), [weakSelf]() {
auto strongSelf = weakSelf.lock();
if (strongSelf) {
strongSelf->play_l();
}
return false;
}, getPoller()));
}
void HlsPlayer::onPacket_l(const char *data, uint64_t len){
_segment.input(data,len);
}
//////////////////////////////////////////////////////////////////////////
HlsPlayerImp::HlsPlayerImp(const EventPoller::Ptr &poller) : PlayerImp<HlsPlayer, PlayerBase>(poller) {
}
void HlsPlayerImp::setOnPacket(const TSSegment::onSegment &cb){
_on_ts = cb;
}
void HlsPlayerImp::onPacket(const char *data,uint64_t len) {
if (_on_ts) {
_on_ts(data, len);
}
if (!_decoder) {
_decoder = DecoderImp::createDecoder(DecoderImp::decoder_ts, this);
}
if (_decoder) {
_decoder->input((uint8_t *) data, len);
}
}
void HlsPlayerImp::onAllTrackReady() {
PlayerImp<HlsPlayer, PlayerBase>::onPlayResult(SockException(Err_success,"play hls success"));
}
void HlsPlayerImp::onPlayResult(const SockException &ex) {
if(ex){
PlayerImp<HlsPlayer, PlayerBase>::onPlayResult(ex);
}else{
_stamp[TrackAudio].syncTo(_stamp[TrackVideo]);
_ticker.resetTime();
weak_ptr<HlsPlayerImp> weakSelf = dynamic_pointer_cast<HlsPlayerImp>(shared_from_this());
//每50毫秒执行一次
_timer = std::make_shared<Timer>(0.05, [weakSelf]() {
auto strongSelf = weakSelf.lock();
if (!strongSelf) {
return false;
}
strongSelf->onTick();
return true;
}, getPoller());
}
}
void HlsPlayerImp::onShutdown(const SockException &ex) {
PlayerImp<HlsPlayer, PlayerBase>::onShutdown(ex);
_timer = nullptr;
}
vector<Track::Ptr> HlsPlayerImp::getTracks(bool trackReady) const {
return MediaSink::getTracks(trackReady);
}
void HlsPlayerImp::inputFrame(const Frame::Ptr &frame) {
//计算相对时间戳
int64_t dts, pts;
_stamp[frame->getTrackType()].revise(frame->dts(), frame->pts(), dts, pts);
//根据时间戳缓存frame
_frame_cache.emplace(dts, Frame::getCacheAbleFrame(frame));
while (!_frame_cache.empty()) {
if (_frame_cache.rbegin()->first - _frame_cache.begin()->first > 30 * 1000) {
//缓存超过30秒,强制消费掉
MediaSink::inputFrame(_frame_cache.begin()->second);
_frame_cache.erase(_frame_cache.begin());
continue;
}
//缓存小于30秒
break;
}
}
void HlsPlayerImp::onTick() {
auto it = _frame_cache.begin();
while (it != _frame_cache.end()) {
if (it->first > _ticker.elapsedTime()) {
//这些帧还未到时间播放
break;
}
//消费掉已经到期的帧
MediaSink::inputFrame(it->second);
it = _frame_cache.erase(it);
}
}
}//namespace mediakit
\ No newline at end of file
/*
* Copyright (c) 2020 The ZLMediaKit project authors. All Rights Reserved.
*
* This file is part of ZLMediaKit(https://github.com/xiongziliang/ZLMediaKit).
*
* Use of this source code is governed by MIT license that can be found in the
* LICENSE file in the root of the source tree. All contributing project authors
* may be found in the AUTHORS file in the root of the source tree.
*/
#ifndef HTTP_HLSPLAYER_H
#define HTTP_HLSPLAYER_H
#include <unordered_set>
#include "Util/util.h"
#include "Poller/Timer.h"
#include "Http/HttpDownloader.h"
#include "Player/MediaPlayer.h"
#include "HlsParser.h"
#include "HttpTSPlayer.h"
#include "Rtp/Decoder.h"
#include "Rtp/TSDecoder.h"
using namespace toolkit;
namespace mediakit {
class HlsPlayer : public HttpClientImp , public PlayerBase , public HlsParser{
public:
HlsPlayer(const EventPoller::Ptr &poller);
~HlsPlayer() override;
/**
* 开始播放
* @param strUrl
*/
void play(const string &strUrl) override;
/**
* 停止播放
*/
void teardown() override;
protected:
/**
* 收到ts包
* @param data ts数据负载
* @param len ts包长度
*/
virtual void onPacket(const char *data, uint64_t len) = 0;
private:
/**
* 解析m3u8成功
* @param is_m3u8_inner 是否为m3u8列表
* @param sequence ts列表seq
* @param ts_map ts列表或m3u8列表
*/
void onParsed(bool is_m3u8_inner,int64_t sequence,const map<int,ts_segment> &ts_map) override;
/**
* 收到http回复头
* @param status 状态码,譬如:200 OK
* @param headers http头
* @return 返回后续content的长度;-1:后续数据全是content;>=0:固定长度content
* 需要指出的是,在http头中带有Content-Length字段时,该返回值无效
*/
int64_t onResponseHeader(const string &status,const HttpHeader &headers) override;
/**
* 收到http conten数据
* @param buf 数据指针
* @param size 数据大小
* @param recvedSize 已收数据大小(包含本次数据大小),当其等于totalSize时将触发onResponseCompleted回调
* @param totalSize 总数据大小
*/
void onResponseBody(const char *buf,int64_t size,int64_t recvedSize,int64_t totalSize) override;
/**
* 接收http回复完毕,
*/
void onResponseCompleted() override;
/**
* http链接断开回调
* @param ex 断开原因
*/
void onDisconnect(const SockException &ex) override;
/**
* 重定向事件
* @param url 重定向url
* @param temporary 是否为临时重定向
* @return 是否继续
*/
bool onRedirectUrl(const string &url,bool temporary) override;
private:
void playDelay();
float delaySecond();
void playNextTs(bool force = false);
void teardown_l(const SockException &ex);
void play_l();
void onPacket_l(const char *data, uint64_t len);
private:
struct UrlComp {
//url忽略?后面的参数
bool operator()(const string& __x, const string& __y) const {
return split(__x,"?")[0] < split(__y,"?")[0];
}
};
private:
bool _is_m3u8 = false;
bool _first = true;
int64_t _last_sequence = -1;
string _m3u8;
Timer::Ptr _timer;
Timer::Ptr _timer_ts;
list<ts_segment> _ts_list;
list<string> _ts_url_sort;
list<string> _m3u8_list;
set<string, UrlComp> _ts_url_cache;
HttpTSPlayer::Ptr _http_ts_player;
TSSegment _segment;
};
class HlsPlayerImp : public PlayerImp<HlsPlayer, PlayerBase> , public MediaSink{
public:
typedef std::shared_ptr<HlsPlayerImp> Ptr;
HlsPlayerImp(const EventPoller::Ptr &poller = nullptr);
~HlsPlayerImp() override {};
void setOnPacket(const TSSegment::onSegment &cb);
private:
void onPacket(const char *data, uint64_t len) override;
void onAllTrackReady() override;
void onPlayResult(const SockException &ex) override;
vector<Track::Ptr> getTracks(bool trackReady = true) const override;
void inputFrame(const Frame::Ptr &frame) override;
void onShutdown(const SockException &ex) override;
void onTick();
private:
TSSegment::onSegment _on_ts;
DecoderImp::Ptr _decoder;
multimap<int64_t, Frame::Ptr> _frame_cache;
Timer::Ptr _timer;
Ticker _ticker;
Stamp _stamp[2];
};
}//namespace mediakit
#endif //HTTP_HLSPLAYER_H
......@@ -48,8 +48,7 @@ public:
}
};
class HttpClient : public TcpClient , public HttpRequestSplitter
{
class HttpClient : public TcpClient , public HttpRequestSplitter{
public:
typedef StrCaseMap HttpHeader;
typedef std::shared_ptr<HttpClient> Ptr;
......
......@@ -13,9 +13,7 @@
#include "HttpClient.h"
#include "Util/SSLBox.h"
using namespace toolkit;
namespace mediakit {
class HttpClientImp: public TcpClientWithSSL<HttpClient> {
......@@ -28,5 +26,4 @@ protected:
};
} /* namespace mediakit */
#endif /* SRC_HTTP_HTTPCLIENTIMP_H_ */
......@@ -188,65 +188,65 @@ bool HttpSession::checkLiveFlvStream(const function<void()> &cb){
bool bClose = !strcasecmp(_parser["Connection"].data(),"close");
weak_ptr<HttpSession> weakSelf = dynamic_pointer_cast<HttpSession>(shared_from_this());
MediaSource::findAsync(_mediaInfo,weakSelf.lock(),[weakSelf,bClose,this,cb](const MediaSource::Ptr &src){
//鉴权结果回调
auto onRes = [cb, weakSelf, bClose](const string &err){
auto strongSelf = weakSelf.lock();
if(!strongSelf){
if (!strongSelf) {
//本对象已经销毁
return;
}
auto rtmp_src = dynamic_pointer_cast<RtmpMediaSource>(src);
if(!rtmp_src){
//未找到该流
sendNotFound(bClose);
if(!err.empty()){
//播放鉴权失败
strongSelf->sendResponse("401 Unauthorized", bClose, nullptr, KeyValue(), std::make_shared<HttpStringBody>(err));
return;
}
//找到流了
auto onRes = [this,rtmp_src,cb](const string &err){
bool authSuccess = err.empty();
if(!authSuccess){
sendResponse("401 Unauthorized", true, nullptr, KeyValue(), std::make_shared<HttpStringBody>(err));
return ;
//异步查找rtmp流
MediaSource::findAsync(strongSelf->_mediaInfo, strongSelf, [weakSelf, bClose, cb](const MediaSource::Ptr &src) {
auto strongSelf = weakSelf.lock();
if (!strongSelf) {
//本对象已经销毁
return;
}
auto rtmp_src = dynamic_pointer_cast<RtmpMediaSource>(src);
if (!rtmp_src) {
//未找到该流
strongSelf->sendNotFound(bClose);
return;
}
if(!cb) {
if (!cb) {
//找到rtmp源,发送http头,负载后续发送
sendResponse("200 OK", false, "video/x-flv",KeyValue(),nullptr,true);
}else{
strongSelf->sendResponse("200 OK", false, "video/x-flv", KeyValue(), nullptr, true);
} else {
//自定义发送http头
cb();
}
//http-flv直播牺牲延时提升发送性能
setSocketFlags();
try{
start(getPoller(),rtmp_src);
_is_flv_stream = true;
}catch (std::exception &ex){
//该rtmp源不存在
shutdown(SockException(Err_shutdown,"rtmp mediasource released"));
}
};
strongSelf->setSocketFlags();
strongSelf->start(strongSelf->getPoller(), rtmp_src);
strongSelf->_is_flv_stream = true;
});
};
weak_ptr<HttpSession> weakSelf = dynamic_pointer_cast<HttpSession>(shared_from_this());
Broadcast::AuthInvoker invoker = [weakSelf,onRes](const string &err){
auto strongSelf = weakSelf.lock();
if(!strongSelf){
return;
}
strongSelf->async([weakSelf,onRes,err](){
auto strongSelf = weakSelf.lock();
if(!strongSelf){
return;
}
onRes(err);
});
};
auto flag = NoticeCenter::Instance().emitEvent(Broadcast::kBroadcastMediaPlayed,_mediaInfo,invoker,static_cast<SockInfo &>(*this));
if(!flag){
//该事件无人监听,默认不鉴权
onRes("");
Broadcast::AuthInvoker invoker = [weakSelf, onRes](const string &err) {
auto strongSelf = weakSelf.lock();
if (!strongSelf) {
return;
}
});
strongSelf->async([onRes, err]() {
onRes(err);
});
};
auto flag = NoticeCenter::Instance().emitEvent(Broadcast::kBroadcastMediaPlayed, _mediaInfo, invoker, static_cast<SockInfo &>(*this));
if (!flag) {
//该事件无人监听,默认不鉴权
onRes("");
}
return true;
}
......
/*
* Copyright (c) 2020 The ZLMediaKit project authors. All Rights Reserved.
*
* This file is part of ZLMediaKit(https://github.com/xiongziliang/ZLMediaKit).
*
* Use of this source code is governed by MIT license that can be found in the
* LICENSE file in the root of the source tree. All contributing project authors
* may be found in the AUTHORS file in the root of the source tree.
*/
#include "HttpTSPlayer.h"
namespace mediakit {
HttpTSPlayer::HttpTSPlayer(const EventPoller::Ptr &poller, bool split_ts){
_segment.setOnSegment([this](const char *data, uint64_t len) { onPacket(data, len); });
_poller = poller ? poller : EventPollerPool::Instance().getPoller();
_split_ts = split_ts;
}
HttpTSPlayer::~HttpTSPlayer() {}
int64_t HttpTSPlayer::onResponseHeader(const string &status, const HttpClient::HttpHeader &headers) {
if (status != "200" && status != "206") {
//http状态码不符合预期
shutdown(SockException(Err_other, StrPrinter << "bad http status code:" + status));
return 0;
}
auto contet_type = const_cast< HttpClient::HttpHeader &>(headers)["Content-Type"];
if (contet_type.find("video/mp2t") == 0 || contet_type.find("video/mpeg") == 0) {
_is_ts_content = true;
}
//后续是不定长content
return -1;
}
void HttpTSPlayer::onResponseBody(const char *buf, int64_t size, int64_t recvedSize, int64_t totalSize) {
if (recvedSize == size) {
//开始接收数据
if (buf[0] == TS_SYNC_BYTE) {
//这是ts头
_is_first_packet_ts = true;
} else {
WarnL << "可能不是http-ts流";
}
}
if (_split_ts) {
_segment.input(buf, size);
} else {
onPacket(buf, size);
}
}
void HttpTSPlayer::onResponseCompleted() {
//接收完毕
shutdown(SockException(Err_success, "play completed"));
}
void HttpTSPlayer::onDisconnect(const SockException &ex) {
if (_on_disconnect) {
_on_disconnect(ex);
_on_disconnect = nullptr;
}
}
void HttpTSPlayer::onPacket(const char *data, uint64_t len) {
if (_on_segment) {
_on_segment(data, len);
}
}
void HttpTSPlayer::setOnDisconnect(const HttpTSPlayer::onShutdown &cb) {
_on_disconnect = cb;
}
void HttpTSPlayer::setOnPacket(const TSSegment::onSegment &cb) {
_on_segment = cb;
}
}//namespace mediakit
\ No newline at end of file
/*
* Copyright (c) 2020 The ZLMediaKit project authors. All Rights Reserved.
*
* This file is part of ZLMediaKit(https://github.com/xiongziliang/ZLMediaKit).
*
* Use of this source code is governed by MIT license that can be found in the
* LICENSE file in the root of the source tree. All contributing project authors
* may be found in the AUTHORS file in the root of the source tree.
*/
#ifndef HTTP_HTTPTSPLAYER_H
#define HTTP_HTTPTSPLAYER_H
#include "Http/HttpDownloader.h"
#include "Player/MediaPlayer.h"
#include "Rtp/TSDecoder.h"
using namespace toolkit;
namespace mediakit {
//http-ts播发器,未实现ts解复用
class HttpTSPlayer : public HttpClientImp{
public:
typedef function<void(const SockException &)> onShutdown;
typedef std::shared_ptr<HttpTSPlayer> Ptr;
HttpTSPlayer(const EventPoller::Ptr &poller = nullptr, bool split_ts = true);
~HttpTSPlayer() override ;
//设置异常断开回调
void setOnDisconnect(const onShutdown &cb);
//设置接收ts包回调
void setOnPacket(const TSSegment::onSegment &cb);
protected:
///HttpClient override///
int64_t onResponseHeader(const string &status,const HttpHeader &headers) override;
void onResponseBody(const char *buf,int64_t size,int64_t recvedSize,int64_t totalSize) override;
void onResponseCompleted() override;
void onDisconnect(const SockException &ex) override ;
//收到ts包
virtual void onPacket(const char *data, uint64_t len);
private:
//是否为mpegts负载
bool _is_ts_content = false;
//第一个包是否为ts包
bool _is_first_packet_ts = false;
//是否判断是否是ts并split
bool _split_ts;
TSSegment _segment;
onShutdown _on_disconnect;
TSSegment::onSegment _on_segment;
};
}//namespace mediakit
#endif //HTTP_HTTPTSPLAYER_H
......@@ -94,6 +94,20 @@ public:
_onRecv = nullptr;
sendRequest(http_url,fTimeOutSec);
}
void closeWsClient(){
if(!_onRecv){
//未连接
return;
}
WebSocketHeader header;
header._fin = true;
header._reserved = 0;
header._opcode = CLOSE;
//客户端需要加密
header._mask_flag = true;
WebSocketSplitter::encode(header, nullptr);
}
protected:
//HttpClientImp override
......@@ -110,7 +124,8 @@ protected:
if(Sec_WebSocket_Accept == const_cast<HttpHeader &>(headers)["Sec-WebSocket-Accept"]){
//success
onWebSocketException(SockException());
return 0;
//后续全是websocket负载数据
return -1;
}
shutdown(SockException(Err_shutdown,StrPrinter << "Sec-WebSocket-Accept mismatch"));
return 0;
......@@ -125,6 +140,16 @@ protected:
*/
void onResponseCompleted() override {}
/**
* 接收websocket负载数据
*/
void onResponseBody(const char *buf,int64_t size,int64_t recvedSize,int64_t totalSize) override{
if(_onRecv){
//完成websocket握手后,拦截websocket数据并解析
_onRecv(buf, size);
}
};
//TcpClient override
/**
......@@ -168,20 +193,6 @@ protected:
}
/**
* tcp收到数据
* @param pBuf
*/
void onRecv(const Buffer::Ptr &pBuf) override{
if(_onRecv){
//完成websocket握手后,拦截websocket数据并解析
_onRecv(pBuf);
}else{
//websocket握手数据
HttpClientImp::onRecv(pBuf);
}
}
/**
* tcp连接断开
* @param ex
*/
......@@ -193,7 +204,7 @@ protected:
//WebSocketSplitter override
/**
* 收到一个webSocket数据包包头,后续将继续触发onWebSocketDecodePlayload回调
* 收到一个webSocket数据包包头,后续将继续触发onWebSocketDecodePayload回调
* @param header 数据包头
*/
void onWebSocketDecodeHeader(const WebSocketHeader &header) override{
......@@ -205,9 +216,9 @@ protected:
* @param header 数据包包头
* @param ptr 负载数据指针
* @param len 负载数据长度
* @param recved 已接收数据长度(包含本次数据长度),等于header._playload_len时则接受完毕
* @param recved 已接收数据长度(包含本次数据长度),等于header._payload_len时则接受完毕
*/
void onWebSocketDecodePlayload(const WebSocketHeader &header, const uint8_t *ptr, uint64_t len, uint64_t recved) override{
void onWebSocketDecodePayload(const WebSocketHeader &header, const uint8_t *ptr, uint64_t len, uint64_t recved) override{
_payload.append((char *)ptr,len);
}
......@@ -285,9 +296,9 @@ private:
//触发连接成功事件
_delegate.onConnect(ex);
//拦截websocket数据接收
_onRecv = [this](const Buffer::Ptr &pBuf){
_onRecv = [this](const char *data, int len){
//解析websocket数据包
this->WebSocketSplitter::decode((uint8_t*)pBuf->data(),pBuf->size());
this->WebSocketSplitter::decode((uint8_t *)data, len);
};
return;
}
......@@ -306,7 +317,7 @@ private:
private:
string _Sec_WebSocket_Key;
function<void(const Buffer::Ptr &pBuf)> _onRecv;
function<void(const char *data, int len)> _onRecv;
ClientTypeImp<ClientType,DataType> &_delegate;
string _payload;
};
......@@ -328,7 +339,9 @@ public:
WebSocketClient(ArgsType &&...args) : ClientTypeImp<ClientType,DataType>(std::forward<ArgsType>(args)...){
_wsClient.reset(new HttpWsClient<ClientType,DataType>(*this));
}
~WebSocketClient() override {}
~WebSocketClient() override {
_wsClient->closeWsClient();
}
/**
* 重载startConnect方法,
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论