How to output PCM data in MFT - ms-media-foundation

Now I have create an ogg decoder in media foundation.
I have decode the ogg data to PCM data in IMFTransform::ProcessOutput.
but I cannot play the PCM data, so now how to play the pcm data?
This is my ProcessOutput code:
HRESULT OggDecoder:: ProcessOutput(
DWORD dwFlags, DWORD cOutputBufferCount,
MFT_OUTPUT_DATA_BUFFER *pOutputSamples, // one per stream
DWORD *pdwStatus )
{
if (dwFlags != 0)
{
return E_INVALIDARG;
}
if (pOutputSamples == NULL || pdwStatus == NULL)
{
return E_POINTER;
}
// Must be exactly one output buffer.
if (cOutputBufferCount != 1)
{
return E_INVALIDARG;
}
// It must contain a sample.
if (pOutputSamples[0].pSample == NULL)
{
return E_INVALIDARG;
}
EnterCriticalSection(&m_critSec);
HRESULT hr = S_OK;
DWORD cbData = 0;
IMFMediaBuffer *pOutput = NULL;
// If we don't have an input sample, we need some input before
// we can generate any output.
if (!HasPendingOutput())
{
hr = MF_E_TRANSFORM_NEED_MORE_INPUT;
}
// Get the output buffer.
if (SUCCEEDED(hr))
{
hr = pOutputSamples[0].pSample->GetBufferByIndex(0, &pOutput);
}
if (SUCCEEDED(hr))
{
hr = pOutput->GetMaxLength(&cbData);
}
if (SUCCEEDED(hr))
{
BYTE* pPCM=NULL;
pOutputBuffer->Lock(&pPCM,NULL,NULL);
GetPCMData(&pPCM); // decode audio data here
pOutputBuffer->SetCurrentLength(nLength);
pOutputSamples[0].pSample->SetSampleTime(sampleTime);
pOutputSamples[0].pSample->SetSampleDuration(sampleDuration);
pOutputBuffer->Unlock();
}
SafeRelease(&pOutput);
LeaveCriticalSection(&m_critSec);
return hr;
}
Is there I missing something or what' wrong with this code.
thanks.

if you use topoedit.exe for debug, it can add one resampler DMO automatically which is a DMO for converting pcm to float format.
you can write the player app, and create the topology by youself, and then you add the resamplyer dmo node.

Related

Is there any control property to fix video playback speed problem when using ffmpeg to decode in Qt platform?

I want to play local video file in Qt platform using ffmpeg to decode.Everything is OK except that play speed is as twice as normal.
The first thing I think about is that there must be a sampling frequency involved.But to be a new to ffmpeg,I don't know how to fix this problem.
Above is my code to read frame,is anyone can tell me what's wrong with the code ?
void VideoThread::run()
{
m_pInFmtCtx = avformat_alloc_context(); //ini struct
char path[] = "d:/test.mp4";
// open specific file
if(avformat_open_input(&m_pInFmtCtx, *path, NULL, NULL)){
{
qDebug()<<"get rtsp failed";
return;
}
else
{
qDebug()<<"get rtsp success";
}
if(avformat_find_stream_info(m_pInFmtCtx, NULL) < 0)
{
qDebug()<<"could not find stream information";
return;
}
int nVideoIndex = -1;
for(int i = 0; i < m_pInFmtCtx->nb_streams; i++)
{
if(m_pInFmtCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{
nVideoIndex = i;
break;
}
}
if(nVideoIndex == -1)
{
qDebug()<<"could not find video stream";
return;
}
qDebug("---------------- File Information ---------------");
m_pCodecCtx = m_pInFmtCtx->streams[nVideoIndex]->codec;
m_pCodec = avcodec_find_decoder(m_pCodecCtx->codec_id);
if(!m_pCodec)
{
qDebug()<<"could not find codec";
return;
}
//start Decoder
if (avcodec_open2(m_pCodecCtx, m_pCodec, NULL) < 0) {
qDebug("Could not open codec.\n");
return;
}
//malloc space for stroring frame
m_pFrame = av_frame_alloc();
m_pFrameRGB = av_frame_alloc();
m_pOutBuf = (uint8_t*)av_malloc(avpicture_get_size(AV_PIX_FMT_RGB32, m_pCodecCtx->width, m_pCodecCtx->height));
avpicture_fill((AVPicture*)m_pFrameRGB, m_pOutBuf, AV_PIX_FMT_RGB32, m_pCodecCtx->width, m_pCodecCtx->height);
//for color switch,from YUV to RGB
struct SwsContext *pImgCtx = sws_getContext(m_pCodecCtx->width, m_pCodecCtx->height, m_pCodecCtx->pix_fmt,
m_pCodecCtx->width, m_pCodecCtx->height, AV_PIX_FMT_RGB32, SWS_BICUBIC, NULL, NULL, NULL);
int nSize = m_pCodecCtx->width * m_pCodecCtx->height;
m_pPacket = (AVPacket *)av_malloc(sizeof(AVPacket));
if(av_new_packet(m_pPacket, nSize) != 0)
{
qDebug()<<"new packet failed";
}
//isInterruptionRequested is a flag,determine whether the thread is over
// read each frame from specific video file
while (!isInterruptionRequested())
{
int nGotPic = 0;
if(av_read_frame(m_pInFmtCtx, m_pPacket) >= 0)
{
if(m_pPacket->stream_index == nVideoIndex)
{
//avcodec_decode_video2()transform from packet to frame
if(avcodec_decode_video2(m_pCodecCtx, m_pFrame, &nGotPic, m_pPacket) < 0)
{
qDebug()<<"decode failed";
return;
}
if(nGotPic)
{ // transform to RGB color
sws_scale(pImgCtx, (const uint8_t* const*)m_pFrame->data,
m_pFrame->linesize, 0, m_pCodecCtx->height, m_pFrameRGB->data,
m_pFrameRGB->linesize);
// save to QImage,for later use
QImage *pImage = new QImage((uchar*)m_pOutBuf, m_pCodecCtx->width, m_pCodecCtx->height, QImage::Format_RGB32);
}
}
}
av_free_packet(m_pPacket);
msleep(5);
}
exec();
}

Media Foundation Frames in Byte Format Run-Time

I use media foundation to capture alive webcam video, is it possible to get the frames captured in a byte streams format in Run-Time and to write them as a stream of bits in a text file after each time cycle ?
I am not sure that I can have the stream in byte format(without container) neither I can do that on run time?
It's not completely clear what you're asking. If you want to capture the raw frames from webcam and save them to a file then the answer is yes that can be done. The Media Foundation SDK MFCaptureToFile sample does exactly that although because it uses a SinkWriter you will have to specify a container file type such as mp4 when creating it.
If you really do want to get the raw frames one by one then you need to dispense with the SinkWriter (or write a custom one). Below is a code snippet that shows getting samples from an IMFSourceReader and converting them into a byte array (and a few other things). You could write the byte array to a text file although unless you do something like put a bitmap header on it it won't be very useful. The IMFSourceReader, IMFMediaTypes all need to be set up correctly prior to being able to call ReadSample but hopefully it gives you a rough idea of where to look further.
HRESULT MFVideoSampler::GetSample(/* out */ array<Byte> ^% buffer)
{
if (_videoReader == NULL) {
return -1;
}
else {
IMFSample *videoSample = NULL;
DWORD streamIndex, flags;
LONGLONG llVideoTimeStamp;
// Initial read results in a null pSample??
CHECK_HR(_videoReader->ReadSample(
//MF_SOURCE_READER_ANY_STREAM, // Stream index.
MF_SOURCE_READER_FIRST_VIDEO_STREAM,
0, // Flags.
&streamIndex, // Receives the actual stream index.
&flags, // Receives status flags.
&llVideoTimeStamp, // Receives the time stamp.
&videoSample // Receives the sample or NULL.
), L"Error reading video sample.");
if (flags & MF_SOURCE_READERF_ENDOFSTREAM)
{
wprintf(L"\tEnd of stream\n");
}
if (flags & MF_SOURCE_READERF_NEWSTREAM)
{
wprintf(L"\tNew stream\n");
}
if (flags & MF_SOURCE_READERF_NATIVEMEDIATYPECHANGED)
{
wprintf(L"\tNative type changed\n");
}
if (flags & MF_SOURCE_READERF_CURRENTMEDIATYPECHANGED)
{
wprintf(L"\tCurrent type changed\n");
IMFMediaType *videoType = NULL;
CHECK_HR(_videoReader->GetCurrentMediaType(
(DWORD)MF_SOURCE_READER_FIRST_VIDEO_STREAM,
&videoType), L"Error retrieving current media type from first video stream.");
Console::WriteLine(GetMediaTypeDescription(videoType));
// Get the frame dimensions and stride
UINT32 nWidth, nHeight;
MFGetAttributeSize(videoType, MF_MT_FRAME_SIZE, &nWidth, &nHeight);
_width = nWidth;
_height = nHeight;
//LONG lFrameStride;
//videoType->GetUINT32(MF_MT_DEFAULT_STRIDE, (UINT32*)&lFrameStride);
videoType->Release();
}
if (flags & MF_SOURCE_READERF_STREAMTICK)
{
wprintf(L"\tStream tick\n");
}
if (!videoSample)
{
printf("Failed to get video sample from MF.\n");
}
else
{
DWORD nCurrBufferCount = 0;
CHECK_HR(videoSample->GetBufferCount(&nCurrBufferCount), L"Failed to get the buffer count from the video sample.\n");
IMFMediaBuffer * pMediaBuffer;
CHECK_HR(videoSample->ConvertToContiguousBuffer(&pMediaBuffer), L"Failed to extract the video sample into a raw buffer.\n");
DWORD nCurrLen = 0;
CHECK_HR(pMediaBuffer->GetCurrentLength(&nCurrLen), L"Failed to get the length of the raw buffer holding the video sample.\n");
byte *imgBuff;
DWORD buffCurrLen = 0;
DWORD buffMaxLen = 0;
pMediaBuffer->Lock(&imgBuff, &buffMaxLen, &buffCurrLen);
if (Stride != -1 && Stride < 0) {
// Bitmap needs to be flipped.
int bmpSize = buffCurrLen; // ToDo: Don't assume RGB/BGR 24.
int absStride = Stride * -1;
byte *flipBuf = new byte[bmpSize];
for (int row = 0; row < _height; row++) {
for (int col = 0; col < absStride; col += 3) {
flipBuf[row * absStride + col] = imgBuff[((_height - row - 1) * absStride) + col];
flipBuf[row * absStride + col + 1] = imgBuff[((_height - row - 1) * absStride) + col + 1];
flipBuf[row * absStride + col + 2] = imgBuff[((_height - row - 1) * absStride) + col + 2];
}
}
buffer = gcnew array<Byte>(buffCurrLen);
Marshal::Copy((IntPtr)flipBuf, buffer, 0, buffCurrLen);
delete flipBuf;
}
else {
buffer = gcnew array<Byte>(buffCurrLen);
Marshal::Copy((IntPtr)imgBuff, buffer, 0, buffCurrLen);
}
pMediaBuffer->Unlock();
pMediaBuffer->Release();
videoSample->Release();
return S_OK;
}
}
}

LwIP Netconn API + FreeRTOS TCP Client Buffer Issue

I've been trying to modify the tcp server example with LwIP in STM32F4DISCOVERY board. I have to write a sender which does not necessarily have to reply server responses. It can send data with 100 ms frequency, for example.
Firstly, the example of TCP server is like this:
static void tcpecho_thread(void *arg)
{
struct netconn *conn, *newconn;
err_t err;
LWIP_UNUSED_ARG(arg);
/* Create a new connection identifier. */
conn = netconn_new(NETCONN_TCP);
if (conn!=NULL) {
/* Bind connection to well known port number 7. */
err = netconn_bind(conn, NULL, DEST_PORT);
if (err == ERR_OK) {
/* Tell connection to go into listening mode. */
netconn_listen(conn);
while (1) {
/* Grab new connection. */
newconn = netconn_accept(conn);
/* Process the new connection. */
if (newconn) {
struct netbuf *buf;
void *data;
u16_t len;
while ((buf = netconn_recv(newconn)) != NULL) {
do {
netbuf_data(buf, &data, &len);
//Incoming package
.....
//Check for data
if (DATA IS CORRECT)
{
//Reply
data = "OK";
len = 2;
netconn_write(newconn, data, len, NETCONN_COPY);
}
} while (netbuf_next(buf) >= 0);
netbuf_delete(buf);
}
/* Close connection and discard connection identifier. */
netconn_close(newconn);
netconn_delete(newconn);
}
}
} else {
printf(" can not bind TCP netconn");
}
} else {
printf("can not create TCP netconn");
}
}
I modified this code to obtain a client version, this is what I've got so far:
static void tcpecho_thread(void *arg)
{
struct netconn *xNetConn = NULL;
struct ip_addr local_ip;
struct ip_addr remote_ip;
int rc1, rc2;
struct netbuf *Gonderilen_Buf = NULL;
struct netbuf *gonderilen_buf = NULL;
void *b_data;
u16_t b_len;
IP4_ADDR( &local_ip, IP_ADDR0, IP_ADDR1, IP_ADDR2, IP_ADDR3 );
IP4_ADDR( &remote_ip, DEST_IP_ADDR0, DEST_IP_ADDR1, DEST_IP_ADDR2, DEST_IP_ADDR3 );
xNetConn = netconn_new ( NETCONN_TCP );
rc1 = netconn_bind ( xNetConn, &local_ip, DEST_PORT );
rc2 = netconn_connect ( xNetConn, &remote_ip, DEST_PORT );
b_data = "+24C"; // Data to be send
b_len = sizeof ( b_data );
while(1)
{
if ( rc1 == ERR_OK )
{
// If button pressed, send data "+24C" to server
if (GPIO_ReadInputDataBit (GPIOA, GPIO_Pin_0) == Bit_SET)
{
Buf = netbuf_new();
netbuf_alloc(Buf, 4); // 4 bytes of buffer
Buf->p->payload = "+24C";
Buf->p->len = 4;
netconn_write(xNetConn, Buf->p->payload, b_len, NETCONN_COPY);
vTaskDelay(100); // To see the result easily in Comm Operator
netbuf_delete(Buf);
}
}
if ( rc1 != ERR_OK || rc2 != ERR_OK )
{
netconn_delete ( xNetConn );
}
}
}
While the writing operation works, netconn_write sends what's on its buffer. It doesnt care whether b_data is NULL or not. I've tested it by adding the line b_data = NULL;
So the resulting output in Comm Operator is like this:
Rec:(02:47:27)+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C
However, I want it to work like this:
Rec:(02:47:22)+24C
Rec:(02:47:27)+24C
Rec:(02:57:12)+24C
Rec:(02:58:41)+24C
The desired write operation happens when I wait for around 8 seconds before I push the button again.
Since netconn_write function does not allow writing to a buffer, I'm not able to clear it. And netconn_send is only allowed for UDP connections.
I need some guidance to understand the problem and to generate a solution for it.
Any help will be greately appreciated.
It's just a matter of printing the result in the correct way.
You can try to add this part of code before writing in the netbuf data structure:
char buffer[20];
sprintf(buffer,"24+ \n");
Buf->p->payload = "+24C";
I see one or two problems in your code, depending on what you want it exactly to do. First of all, you're not sending b_data at all, but a constant string:
b_data = "+24C"; // Data to be send
and then
Buf->p->payload = "+24C";
Buf->p->len = 4;
netconn_write(xNetConn, Buf->p->payload, b_len, NETCONN_COPY);
b_data is not anywhere mentioned there. What is sent is the payload. Try Buf->p->payload = b_data; if it's what you want to achieve.
Second, if you want the +24C text to be sent only once when you push the button, you'll have to have a loop to wait for the button to open again before continuing the loop, or it will send +24C continuously until you stop pushing the button. Something in this direction:
while (GPIO_ReadInputDataBit (GPIOA, GPIO_Pin_0) == Bit_SET) {
vTaskDelay(1);
}

Video Renderer hangs in Directshow

I have created a filter graph manually in a Directshow experiment. Here, I have added a video source filter and a VMR-9 renderer. The Video Window of the Renderer does not move, minimize, close until the video reaches end of file. If I directly render the source filter, this does not occur. I need a solution to this.
while(1)
{
IGraphBuilder *pGraph = NULL;
IMediaControl *pControl = NULL;
IMediaEvent *pEvent = NULL;
IBaseFilter *pInputFileFilter = NULL;
IBaseFilter *pVideoRenderer = NULL;
IPin *pFileOut = NULL, *pVidIn = NULL;
IVideoWindow *VidWindow=NULL;
string s=openfilename();
wstring ws;
ws.assign (s.begin (), s.end ());
// Initialize the COM library.
HRESULT hr = CoInitialize(NULL);
if (FAILED(hr))
{
printf("ERROR - Could not initialize COM library");
return 1;
}
// Create the filter graph manager and query for interfaces.
hr = CoCreateInstance(CLSID_FilterGraph, NULL, CLSCTX_INPROC_SERVER,
IID_IGraphBuilder, (void **)&pGraph);
if (FAILED(hr))
{
printf("ERROR - Could not create the Filter Graph Manager.");
return 1;
}
hr = pGraph->QueryInterface(IID_IMediaControl, (void **)&pControl);
hr = pGraph->QueryInterface(IID_IMediaEvent, (void **)&pEvent);
// And add the filter to the filter graph
// using the member function AddFilter.
hr = pGraph->AddSourceFilter(ws.c_str(), ws.c_str(), &pInputFileFilter);
if (SUCCEEDED(hr))
{
// Now create an instance of the video renderer
// and obtain a pointer to its IBaseFilter interface.
hr = CoCreateInstance(CLSID_VideoMixingRenderer9,NULL, CLSCTX_INPROC_SERVER, IID_IBaseFilter,
(void **)&pVideoRenderer);
if (SUCCEEDED(hr))
{
hr = pGraph->AddFilter(pVideoRenderer, L"Video Renderer");
//pVideoRenderer->QueryInterface(IID_IVideoWindow,(void**)&VidWindow);
if (SUCCEEDED(hr))
{
// Now we need to connect the output pin of the source
// to the input pin of the renderer.
// Obtain the output pin of the source filter.
// The local function GetPin does this.
pFileOut = GetPin(pInputFileFilter, PINDIR_OUTPUT);
if (pFileOut != NULL)
{ // Is the pin good?
// Obtain the input pin of the WAV renderer.
// Obtain the input pin of the WAV renderer.
pVidIn = GetPin(pVideoRenderer, PINDIR_INPUT);
if (pVidIn != NULL)
{ // Is the pin good?
// Connect the pins together:
// We use the Filter Graph Manager's
// member function Connect,
// which uses Intelligent Connect.
// If this fails, DirectShow couldn't
// render the media file.
hr = pGraph->Connect(pFileOut, pVidIn);
}
}
}
}
}
if (SUCCEEDED(hr))
{
//VidWindow->put_FullScreenMode(OATRUE);
//VidWindow->put_Owner(NULL);
// Run the graph.
hr = pControl->Run();
if (SUCCEEDED(hr))
{
// Wait for completion.
long evCode;
pEvent->WaitForCompletion(INFINITE, &evCode);
// Note: Do not use INFINITE in a real application, because it
// can block indefinitely.
}
hr = pControl->Stop();
}
// Now release everything we instantiated--
// that is, if it got instantiated.
if(pFileOut)
{ // If it exists, non-NULL
pFileOut->Release(); // Then release it
}
if (pVidIn)
{
pVidIn->Release();
}
if (pInputFileFilter)
{
pInputFileFilter->Release();
}
if (pVideoRenderer)
{
pVideoRenderer->Release();
}
//VidWindow->Release();
pControl->Release();
pEvent->Release();
pGraph->Release();
CoUninitialize();
}
Look into your code at the fragment:
// Do not use INFINITE in a real application, because it
// can block indefinitely.
This is where you are expected to add a message loop and dispatch messages. This is going to bring some awaited life into your application. You can poll for completion or register your window to receive a message when such completion occurs.

How to get data from directshow filter output pin?

I have direct show filter which takes an input and process it and give the result to outputpin.
I want to write this filter output data to a file...And i want to do it in its filter class.So i want to get the output pin buffer data.
Shortly how to reach final data of outputpin in its filter? How can i do it?
Not: The output pin is derived from CBaseOutputPin.This is an open source filter it "magically" :-) put wright data to its output pin which i can not figure out how yet...
Update:
Here is the siutuation:
Media Source ----> GFilter ----> FileWriter
I have source code of GFilter... I have no source code of FileWriter...What i want to make is make GFilter write its own data...I debug GFilter get some insight how its transform data but my attemp to write this data result with wrong data... So i deceide for now how to simply get data at its output pin...
Update[2]
In Filter outputpin somwhere the filter writer pass the file writer pin to IStreamPtr variable...Everthing seems to written to a variable m_pIStream which is type of [IStreamPtr]
GFilterOutput::CompleteConnect(IPin *pReceivePin)
{
// make sure that this is the file writer, supporting
// IStream, or we will not be able to write out the metadata
// at stop time
// m_pIStream is IStreamPtr type
m_pIStream = pReceivePin;
if (m_pIStream == NULL)
{
return E_NOINTERFACE;
}
return CBaseOutputPin::CompleteConnect(pReceivePin);
}
...
GFilterOutput::Replace(LONGLONG pos, const BYTE* pBuffer, long cBytes)
{
//OutputDebugStringA("DEBUG: Now at MuxOutput Replace");
// all media content is written when the graph is running,
// using IMemInputPin. On stop (during our stop, but after the
// file writer has stopped), we switch to IStream for the metadata.
// The in-memory index is updated after a successful call to this function, so
// any data not written on completion of Stop will not be in the index.
CAutoLock lock(&m_csWrite);
HRESULT hr = S_OK;
if (m_bUseIStream)
{
IStreamPtr pStream = GetConnected();
if (m_pIStream == NULL)
{
hr = E_NOINTERFACE;
} else {
LARGE_INTEGER liTo;
liTo.QuadPart = pos;
ULARGE_INTEGER uliUnused;
hr = m_pIStream->Seek(liTo, STREAM_SEEK_SET, &uliUnused);
if (SUCCEEDED(hr))
{
ULONG cActual;
hr = m_pIStream->Write(pBuffer, cBytes, &cActual);
if (SUCCEEDED(hr) && ((long)cActual != cBytes))
{
hr = E_FAIL;
}
}
}
} else {
// where the buffer boundaries lie is not important in this
// case, so break writes up into the buffers.
while (cBytes && (hr == S_OK))
{
IMediaSamplePtr pSample;
hr = GetDeliveryBuffer(&pSample, NULL, NULL, 0);
if (SUCCEEDED(hr))
{
long cThis = min(pSample->GetSize(), cBytes);
BYTE* pDest;
pSample->GetPointer(&pDest);
CopyMemory(pDest, pBuffer, cThis);
pSample->SetActualDataLength(cThis);
// time stamps indicate file position in bytes
LONGLONG tStart = pos;
LONGLONG tEnd = pos + cThis;
pSample->SetTime(&tStart, &tEnd);
hr = Deliver(pSample);
if (SUCCEEDED(hr))
{
pBuffer += cThis;
cBytes -= cThis;
pos += cThis;
}
}
}
}
return hr;
}
You have full source code, step it through with debugger until you reach the point where your filter calls IPin::Receive of the peer downstream filter, update/override code there and you have full control as for writing data into file etc.

Resources