當前位置:首頁 » 網頁前端 » webffmpeg
擴展閱讀
webinf下怎麼引入js 2023-08-31 21:54:13
堡壘機怎麼打開web 2023-08-31 21:54:11

webffmpeg

發布時間: 2023-07-28 07:47:34

『壹』 做跨平台app必須的會web嗎

是一定要會的
webapp可以通過互聯網上的第三方app開發平台實現,比如「應用之星」等網站;
跨平台app一般都要程序猿寫代碼開發,得有一定的預算請人才行,
要能成功編譯出官方Web源碼,需完整下載三部分。一是Web核心代碼,大概30M;二是第三方支持庫,像BoringSSL、libvpx、ffmpeg等等。

『貳』 QT Web引擎支持rtsp流嗎

支持
qt客戶端實時播放rtsp音頻流demo並且無雜音
推流工具使用EasyDarwin
推流直接使用ffmpeg 推流到 EasyDarwin 伺服器,音頻流取自電腦拾音器,ffmepg指令為:
ffmpeg -f dshow -i audio=「麥克風 (Realtek® Audio)」 -codec:a aac -ac 2 -ar 16000 -f rtsp rtsp://10.1.3.170:554/3_a.sdp
至於怎麼推流自行網路呀



客戶端採用FFMPEG 取流,解析出PCM 音頻裸流,在一個線程中接收rtsp流並解析出音頻數據,具體代碼如下PlayVoicePlayer.c:
#include "playvoiceplayer.h"

#include <QDebug>

PlayVoicePlayer::PlayVoicePlayer(QObject *parent) : QThread(parent)
{

}

void PlayVoicePlayer::startPlay(QString url)
{
qDebug() << "Video2PCM::startPlay()";
playUrl = url;
unGetStream = true;
this->start();
}

void PlayVoicePlayer::run()
{

qDebug() << "Video2PCM::run():"<<playUrl;

isStart = true;
AVFormatContext *pFormatCtx = NULL;
AVCodecContext *pCodecCtx = NULL;
AVCodec *pCodec = NULL;
AVPacket packet;
AVFrame *pAudioFrame = NULL;
uint8_t *buffer = NULL;
struct SwrContext *audio_convert_ctx = NULL;
int got_picture;
int audioIndex;
int out_buffer_size;

av_register_all();
if (avformat_open_input(&pFormatCtx, playUrl.toStdString().data(), NULL, NULL) != 0)
{
emit getPcmStreamStop();
qDebug()<< " Video2PCM Couldn't open an input stream.";
return;
}
pFormatCtx->probesize = 5 *1024; //使用1000*1024 延時大概是2秒開始開始播放1920*1080使用這個參數暫時沒發新崩潰的情況
pFormatCtx->max_analyze_ration = 1 * AV_TIME_BASE;

if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
{
emit getPcmStreamStop();
qDebug()<< "Video2PCM Couldn't find stream information.";
return;
}
audioIndex = -1;
for (int i = 0; i < pFormatCtx->nb_streams; i++)
{
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
{
audioIndex = i;
break;
}
}

if (audioIndex == -1)
{
emit getPcmStreamStop();
qDebug()<< "Video2PCM Couldn't find a audio stream.";
return;
}

pCodecCtx = pFormatCtx->streams[audioIndex]->codec;
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (pCodec == NULL) printf("Codec not found.\n");
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
{
emit getPcmStreamStop();
qDebug()<< "Video2PCM Could not open codec.";
return;
}

pAudioFrame = av_frame_alloc();
if (pAudioFrame == NULL)
{
emit getPcmStreamStop();
qDebug()<< "Video2PCM Could not alloc AVFrame";
return;
}

//音頻輸出參數
uint64_t out_channel_layout = AV_CH_LAYOUT_STEREO;//聲道格式
AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S32;//采樣格式
int out_nb_samples = pCodecCtx->frame_size;//nb_samples: AAC-1024 MP3-1152
// int out_sample_rate = 44100;//采樣率
int out_sample_rate = 16000;//采樣率
int out_nb_channels = av_get_channel_layout_nb_channels(out_channel_layout);//根據聲道格式返回聲道個數
out_buffer_size = av_samples_get_buffer_size(NULL, out_nb_channels, out_nb_samples, out_sample_fmt, 1);

buffer = (uint8_t *)av_malloc(MAX_AUDIO_FRAME_SIZE);

audio_convert_ctx = swr_alloc();
if (audio_convert_ctx == NULL)
{
{
emit getPcmStreamStop();
qDebug()<< " Video2PCM Could not allocate SwrContext";
return;
}
}

swr_alloc_set_opts(audio_convert_ctx, out_channel_layout, out_sample_fmt,out_sample_rate,
pCodecCtx->channel_layout, pCodecCtx->sample_fmt, pCodecCtx->sample_rate, 0, NULL);

swr_init(audio_convert_ctx);

int index = 0;//計數器
while (isStart)
{
if(av_read_frame(pFormatCtx, &packet)<0)
{
emit getPcmStreamStop();
break;
}
if (packet.stream_index == audioIndex) {
if (avcodec_decode_audio4(pCodecCtx, pAudioFrame, &got_picture, &packet) < 0) {
qDebug() <<("Error in decoding audio frame.\n");
emit getPcmStreamStop();
break;
}
if (got_picture) {

// int dst_nb_samples = av_rescale_rnd(swr_get_delay(audio_convert_ctx, pAudioFrame->sample_rate) + pAudioFrame->nb_samples, pAudioFrame->sample_rate, pAudioFrame->sample_rate, AVRounding(1));
swr_convert(audio_convert_ctx, &buffer, MAX_AUDIO_FRAME_SIZE, (const uint8_t **)pAudioFrame->data, pAudioFrame->nb_samples);
if(unGetStream == true)
{
qDebug() << "Video2PCM unGetStream";
unGetStream =false;
emit getAudioStream();
}
// printf("index:%5d\t pts:%lld\t packet size:%d\n", index, packet.pts, packet.size);
//Write PCM
// fwrite(buffer, 1, out_buffer_size, fp_pcm);
emit decodePCM(packet.pts, QByteArray((char*)buffer, out_buffer_size));

index++;
}
}

av_free_packet(&packet);
}

qDebug() << "Video2PCM close1";
swr_free(&audio_convert_ctx);
av_free(buffer);
av_frame_free(&pAudioFrame);
avcodec_close(pCodecCtx);
avformat_close_input(&pFormatCtx);
isStart= false;

對應的PlayVoicePlayer.h文件如下:

#ifndef PLAYVOICEPLAYER_H
#define PLAYVOICEPLAYER_H

#include <QObject>
#include <QThread>
#ifdef _WINDOWS
extern "C"
{
#include "libavcodec\avcodec.h"
#include "libavformat\avformat.h"
#include "libswresample\swresample.h"
};
#else
extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswresample/swresample.h"
};
#endif

#include <QAudioFormat>
#include <QAudioOutput>

#define MAX_AUDIO_FRAME_SIZE 192000

class PlayVoicePlayer : public QThread
{
Q_OBJECT
public:
explicit PlayVoicePlayer(QObject *parent = nullptr);
void startPlay(QString url);
private:
bool isStart = true;
QString playUrl;
bool unGetStream;

signals:
void getPcmStreamStop();
void getAudioStream();
void decodePCM(qint64 pts, const QByteArray& pcm);
protected:
void run();
};

#endif // PLAYVOICEPLAYER_H

『叄』 有沒有認識FFmpeg Webui這套程序

為什麼要用帶ui的?ffmpeg本來就可以用命令行來本地轉視頻格式,而且也免費,這種ui是給普通用戶手動轉碼的,一般不需要。