I have created a filter graph manually in a Directshow experiment. Here, I have added a video source filter and a VMR-9 renderer. The Video Window of the Renderer does not move, minimize, close until the video reaches end of file. If I directly render the source filter, this does not occur. I need a solution to this.
while(1)
{
IGraphBuilder *pGraph = NULL;
IMediaControl *pControl = NULL;
IMediaEvent *pEvent = NULL;
IBaseFilter *pInputFileFilter = NULL;
IBaseFilter *pVideoRenderer = NULL;
IPin *pFileOut = NULL, *pVidIn = NULL;
IVideoWindow *VidWindow=NULL;
string s=openfilename();
wstring ws;
ws.assign (s.begin (), s.end ());
// Initialize the COM library.
HRESULT hr = CoInitialize(NULL);
if (FAILED(hr))
{
printf("ERROR - Could not initialize COM library");
return 1;
}
// Create the filter graph manager and query for interfaces.
hr = CoCreateInstance(CLSID_FilterGraph, NULL, CLSCTX_INPROC_SERVER,
IID_IGraphBuilder, (void **)&pGraph);
if (FAILED(hr))
{
printf("ERROR - Could not create the Filter Graph Manager.");
return 1;
}
hr = pGraph->QueryInterface(IID_IMediaControl, (void **)&pControl);
hr = pGraph->QueryInterface(IID_IMediaEvent, (void **)&pEvent);
// And add the filter to the filter graph
// using the member function AddFilter.
hr = pGraph->AddSourceFilter(ws.c_str(), ws.c_str(), &pInputFileFilter);
if (SUCCEEDED(hr))
{
// Now create an instance of the video renderer
// and obtain a pointer to its IBaseFilter interface.
hr = CoCreateInstance(CLSID_VideoMixingRenderer9,NULL, CLSCTX_INPROC_SERVER, IID_IBaseFilter,
(void **)&pVideoRenderer);
if (SUCCEEDED(hr))
{
hr = pGraph->AddFilter(pVideoRenderer, L"Video Renderer");
//pVideoRenderer->QueryInterface(IID_IVideoWindow,(void**)&VidWindow);
if (SUCCEEDED(hr))
{
// Now we need to connect the output pin of the source
// to the input pin of the renderer.
// Obtain the output pin of the source filter.
// The local function GetPin does this.
pFileOut = GetPin(pInputFileFilter, PINDIR_OUTPUT);
if (pFileOut != NULL)
{ // Is the pin good?
// Obtain the input pin of the WAV renderer.
// Obtain the input pin of the WAV renderer.
pVidIn = GetPin(pVideoRenderer, PINDIR_INPUT);
if (pVidIn != NULL)
{ // Is the pin good?
// Connect the pins together:
// We use the Filter Graph Manager's
// member function Connect,
// which uses Intelligent Connect.
// If this fails, DirectShow couldn't
// render the media file.
hr = pGraph->Connect(pFileOut, pVidIn);
}
}
}
}
}
if (SUCCEEDED(hr))
{
//VidWindow->put_FullScreenMode(OATRUE);
//VidWindow->put_Owner(NULL);
// Run the graph.
hr = pControl->Run();
if (SUCCEEDED(hr))
{
// Wait for completion.
long evCode;
pEvent->WaitForCompletion(INFINITE, &evCode);
// Note: Do not use INFINITE in a real application, because it
// can block indefinitely.
}
hr = pControl->Stop();
}
// Now release everything we instantiated--
// that is, if it got instantiated.
if(pFileOut)
{ // If it exists, non-NULL
pFileOut->Release(); // Then release it
}
if (pVidIn)
{
pVidIn->Release();
}
if (pInputFileFilter)
{
pInputFileFilter->Release();
}
if (pVideoRenderer)
{
pVideoRenderer->Release();
}
//VidWindow->Release();
pControl->Release();
pEvent->Release();
pGraph->Release();
CoUninitialize();
}
Look into your code at the fragment:
// Do not use INFINITE in a real application, because it
// can block indefinitely.
This is where you are expected to add a message loop and dispatch messages. This is going to bring some awaited life into your application. You can poll for completion or register your window to receive a message when such completion occurs.
Related
I've developed a video player based on Qt and QtGstreamer. It is used to play live streams (RTSP). I have to add the possibility for the user to take snapshots while he is playing a live stream without perturbing the video playback.
Here the graph of the pipeline I've made:
-->queue-->autovideosink
uridecodebin-->videoflip-->tee--|
| -->queue->videoconvert-->pngenc-->filesink
|
|->audioconvert-->autoaudiosink
I use the pad-added signal from uridecodebin to add and link dynamically my elements to the pipeline, function of the received caps.
void Player::onPadAdded(const QGst::PadPtr &pad)
{
QGst::CapsPtr caps = pad->currentCaps();
if (caps->toString().startsWith("video/x-raw")) {
qDebug("Received 'video/x-raw' caps");
handleNewVideoPad(pad);
}
else if (caps->toString().startsWith("audio/x-raw")) {
qDebug("Received 'audio/x-raw' caps");
if (!m_audioEnabled) {
qDebug("Audio is disabled in the player. Ignoring...");
return;
}
handleNewAudioPad(pad);
}
else {
qWarning("Unsuported caps, arborting ...!");
return;
}
}
[...]
void Player::handleNewVideoPad(QGst::PadPtr pad)
{
m_player->videoTeeVideoSrcPad = m_player->videoTee->getRequestPad("src_%u");
// Add video elements
m_player->pipeline->add(m_player->videoFlip);
m_player->pipeline->add(m_player->videoTee);
m_player->pipeline->add(m_player->videoQueue);
m_player->pipeline->add(m_player->videoSink);
// Add snap elements
m_player->pipeline->add(m_player->snapQueue);
m_player->pipeline->add(m_player->snapConverter);
m_player->pipeline->add(m_player->snapEncoder);
m_player->pipeline->add(m_player->snapSink);
// Link video elements
m_player->videoFlip->link(m_player->videoTee);
m_player->videoQueue->link(m_player->videoSink);
// Link snap elements
m_player->snapQueue->link(m_player->snapConverter);
m_player->snapConverter->link(m_player->snapEncoder);
m_player->snapEncoder->link(m_player->snapSink);
// Lock snap elements
m_player->snapQueue->setStateLocked(true);
m_player->snapConverter->setStateLocked(true);
m_player->snapEncoder->setStateLocked(true);
m_player->snapSink->setStateLocked(true);
m_player->videoFlip->setState(QGst::StatePlaying);
m_player->videoTee->setState(QGst::StatePlaying);
m_player->videoQueue->setState(QGst::StatePlaying);
m_player->videoSink->setState(QGst::StatePlaying);
// Link pads
m_player->videoTeeVideoSrcPad->link(m_player->videoQueue->getStaticPad("sink"));
pad->link(m_player->videoSinkPad);
m_player->videoLinked = true;
}
The method to take a snapshot:
void Player::takeSnapshot()
{
QDateTime dateTime = QDateTime::currentDateTime();
QString snapLocation = QString("/%1/snap_%2.png").arg(m_snapDir).arg(dateTime.toString(Qt::ISODate));
m_player->inSnapshotCaputre = true;
if (m_player->videoTeeSnapSrcPad) {
m_player->videoTee->releaseRequestPad(m_player->videoTeeSnapSrcPad);
m_player->videoTeeSnapSrcPad.clear();
}
m_player->videoTeeSnapSrcPad = m_player->videoTee->getRequestPad("src_%u");
// Stop the snapshot branch
m_player->snapQueue->setState(QGst::StateNull);
m_player->snapConverter->setState(QGst::StateNull);
m_player->snapEncoder->setState(QGst::StateNull);
m_player->snapSink->setState(QGst::StateNull);
// Link Tee src pad to snap queue sink pad
m_player->videoTeeSnapSrcPad->link(m_player->snapQueue->getStaticPad("sink"));
// Set the snapshot location property
m_player->snapSink->setProperty("location", snapLocation);
// Unlock snapshot branch
m_player->snapQueue->setStateLocked(false);
m_player->snapConverter->setStateLocked(false);
m_player->snapEncoder->setStateLocked(false);
m_player->snapSink->setStateLocked(false);
m_player->videoTeeSnapSrcPad->setActive(true);
// Synch snapshot branch state with parent
m_player->snapQueue->syncStateWithParent();
m_player->snapConverter->syncStateWithParent();
m_player->snapEncoder->syncStateWithParent();
m_player->snapSink->syncStateWithParent();
}
The bus message callback:
void Player::onBusMessage(const QGst::MessagePtr & message)
{
QGst::ElementPtr source = message->source().staticCast<QGst::Element>();
switch (message->type()) {
case QGst::MessageEos: { //End of stream. We reached the end of the file.
qDebug("Message End Off Stream");
if (m_player->inSnapshotCaputre) {
blockSignals(true);
pause();
play();
blockSignals(false);
m_player->inSnapshotCaputre = false;
}
else {
m_eos = true;
stop();
}
break;
}
[...]
}
The problem is:
When I set the snapshot property to true of the pngenc element, I receive the EOS event which stop my pipeline, so I need to restart it, which freeze the video playback for about half a second, which in not acceptable in my case.
When I set the snapshot property to false of the pngenc element, I have no pipeline perturbations, but my png file keeps growing until I call again the Player::takeSnapshot() method.
Where am I wrong ? Is there a better way to do it ?
I've tried unsuccessfully creating a QGst::Bin element for my snapshot branch. What about pad probe ?
Thanks by advance
You can take the last-sample property on any sink, e.g. your video sink. This contains a GstSample, which has a buffer with the very latest video frame in it. You can take that as a snapshot, and e.g. with gst_video_convert_sample() or the async variant of it, convert it to a PNG/JPG/whatever.
See https://gstreamer.freedesktop.org/data/doc/gstreamer/head/gstreamer-libs/html/GstBaseSink.html#GstBaseSink--last-sample and https://gstreamer.freedesktop.org/data/doc/gstreamer/head/gst-plugins-base-libs/html/gst-plugins-base-libs-gstvideo.html#gst-video-convert-sample
Alternatively, you would have to shut down the filesink snapshot pipeline after the first frame. For example by having a pad probe to know when the first frame happened, and then injecting an EOS event to prevent further PNG frames to be appended to the same file.
Thanks to #sebastian-droge answer, I found the solution, using gst_video_convert_sample and the last-sample property of my video sink.
The solution I've implemented is:
void Player::takeSnapshot()
{
QDateTime currentDate = QDateTime::currentDateTime();
QString location = QString("%1/snap_%2.png").arg(QDir::homePath()).arg(currentDate.toString(Qt::ISODate));
QImage snapShot;
QImage::Format snapFormat;
QGlib::Value val = m_videoSink->property("last-sample");
GstSample *videoSample = (GstSample *)g_value_get_boxed(val);
QGst::SamplePtr sample = QGst::SamplePtr::wrap(videoSample);
QGst::SamplePtr convertedSample;
QGst::BufferPtr buffer;
QGst::CapsPtr caps = sample->caps();
QGst::MapInfo mapInfo;
GError *err = NULL;
GstCaps * capsTo = NULL;
const QGst::StructurePtr structure = caps->internalStructure(0);
int width, height;
width = structure.data()->value("width").get<int>();
height = structure.data()->value("height").get<int>();
qDebug() << "Sample caps:" << structure.data()->toString();
/*
* { QImage::Format_RGBX8888, GST_VIDEO_FORMAT_RGBx },
* { QImage::Format_RGBA8888, GST_VIDEO_FORMAT_RGBA },
* { QImage::Format_RGB888 , GST_VIDEO_FORMAT_RGB },
* { QImage::Format_RGB16 , GST_VIDEO_FORMAT_RGB16 }
*/
snapFormat = QImage::Format_RGB888;
capsTo = gst_caps_new_simple("video/x-raw",
"format", G_TYPE_STRING, "RGB",
"width", G_TYPE_INT, width,
"height", G_TYPE_INT, height,
NULL);
convertedSample = QGst::SamplePtr::wrap(gst_video_convert_sample(videoSample, capsTo, GST_SECOND, &err));
if (convertedSample.isNull()) {
qWarning() << "gst_video_convert_sample Failed:" << err->message;
}
else {
qDebug() << "Converted sample caps:" << convertedSample->caps()->toString();
buffer = convertedSample->buffer();
buffer->map(mapInfo, QGst::MapRead);
snapShot = QImage((const uchar *)mapInfo.data(),
width,
height,
snapFormat);
qDebug() << "Saving snap to" << location;
snapShot.save(location);
buffer->unmap(mapInfo);
}
val.clear();
sample.clear();
convertedSample.clear();
buffer.clear();
caps.clear();
g_clear_error(&err);
if (capsTo)
gst_caps_unref(capsTo);
}
I've create a simple test application, which implement this solution. The code is available on my Github
I've been trying to modify the tcp server example with LwIP in STM32F4DISCOVERY board. I have to write a sender which does not necessarily have to reply server responses. It can send data with 100 ms frequency, for example.
Firstly, the example of TCP server is like this:
static void tcpecho_thread(void *arg)
{
struct netconn *conn, *newconn;
err_t err;
LWIP_UNUSED_ARG(arg);
/* Create a new connection identifier. */
conn = netconn_new(NETCONN_TCP);
if (conn!=NULL) {
/* Bind connection to well known port number 7. */
err = netconn_bind(conn, NULL, DEST_PORT);
if (err == ERR_OK) {
/* Tell connection to go into listening mode. */
netconn_listen(conn);
while (1) {
/* Grab new connection. */
newconn = netconn_accept(conn);
/* Process the new connection. */
if (newconn) {
struct netbuf *buf;
void *data;
u16_t len;
while ((buf = netconn_recv(newconn)) != NULL) {
do {
netbuf_data(buf, &data, &len);
//Incoming package
.....
//Check for data
if (DATA IS CORRECT)
{
//Reply
data = "OK";
len = 2;
netconn_write(newconn, data, len, NETCONN_COPY);
}
} while (netbuf_next(buf) >= 0);
netbuf_delete(buf);
}
/* Close connection and discard connection identifier. */
netconn_close(newconn);
netconn_delete(newconn);
}
}
} else {
printf(" can not bind TCP netconn");
}
} else {
printf("can not create TCP netconn");
}
}
I modified this code to obtain a client version, this is what I've got so far:
static void tcpecho_thread(void *arg)
{
struct netconn *xNetConn = NULL;
struct ip_addr local_ip;
struct ip_addr remote_ip;
int rc1, rc2;
struct netbuf *Gonderilen_Buf = NULL;
struct netbuf *gonderilen_buf = NULL;
void *b_data;
u16_t b_len;
IP4_ADDR( &local_ip, IP_ADDR0, IP_ADDR1, IP_ADDR2, IP_ADDR3 );
IP4_ADDR( &remote_ip, DEST_IP_ADDR0, DEST_IP_ADDR1, DEST_IP_ADDR2, DEST_IP_ADDR3 );
xNetConn = netconn_new ( NETCONN_TCP );
rc1 = netconn_bind ( xNetConn, &local_ip, DEST_PORT );
rc2 = netconn_connect ( xNetConn, &remote_ip, DEST_PORT );
b_data = "+24C"; // Data to be send
b_len = sizeof ( b_data );
while(1)
{
if ( rc1 == ERR_OK )
{
// If button pressed, send data "+24C" to server
if (GPIO_ReadInputDataBit (GPIOA, GPIO_Pin_0) == Bit_SET)
{
Buf = netbuf_new();
netbuf_alloc(Buf, 4); // 4 bytes of buffer
Buf->p->payload = "+24C";
Buf->p->len = 4;
netconn_write(xNetConn, Buf->p->payload, b_len, NETCONN_COPY);
vTaskDelay(100); // To see the result easily in Comm Operator
netbuf_delete(Buf);
}
}
if ( rc1 != ERR_OK || rc2 != ERR_OK )
{
netconn_delete ( xNetConn );
}
}
}
While the writing operation works, netconn_write sends what's on its buffer. It doesnt care whether b_data is NULL or not. I've tested it by adding the line b_data = NULL;
So the resulting output in Comm Operator is like this:
Rec:(02:47:27)+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C+24C
However, I want it to work like this:
Rec:(02:47:22)+24C
Rec:(02:47:27)+24C
Rec:(02:57:12)+24C
Rec:(02:58:41)+24C
The desired write operation happens when I wait for around 8 seconds before I push the button again.
Since netconn_write function does not allow writing to a buffer, I'm not able to clear it. And netconn_send is only allowed for UDP connections.
I need some guidance to understand the problem and to generate a solution for it.
Any help will be greately appreciated.
It's just a matter of printing the result in the correct way.
You can try to add this part of code before writing in the netbuf data structure:
char buffer[20];
sprintf(buffer,"24+ \n");
Buf->p->payload = "+24C";
I see one or two problems in your code, depending on what you want it exactly to do. First of all, you're not sending b_data at all, but a constant string:
b_data = "+24C"; // Data to be send
and then
Buf->p->payload = "+24C";
Buf->p->len = 4;
netconn_write(xNetConn, Buf->p->payload, b_len, NETCONN_COPY);
b_data is not anywhere mentioned there. What is sent is the payload. Try Buf->p->payload = b_data; if it's what you want to achieve.
Second, if you want the +24C text to be sent only once when you push the button, you'll have to have a loop to wait for the button to open again before continuing the loop, or it will send +24C continuously until you stop pushing the button. Something in this direction:
while (GPIO_ReadInputDataBit (GPIOA, GPIO_Pin_0) == Bit_SET) {
vTaskDelay(1);
}
Using GraphEdit, I can add the filter to the editor and use it to render a video. When I create the instance via COM (using DirectShow.NET), the method EnumPins (followed by Next checks) returns no pins.
Is there a reason why GraphEdit would show the pins and I cannot get a reference to the pins via the COM interfaces?
EDIT: Here is the method I am using to get the first available pin (of any type). Nothing is ever returned for this filter, but I can see 4 pins (two in, two out) in graph edit.
public static IPin GetPins(IBaseFilter vSource, int iIndex)
{
IEnumPins pins;
var ppPins = new IPin[1];
if (vSource == null)
return null;
DsError.ThrowExceptionForHR(vSource.EnumPins(out pins));
try
{
while (pins.Next(1, ppPins, IntPtr.Zero) == 0)
{
return ppPins[0];
}
}
finally
{
Marshal.ReleaseComObject(pins);
}
return null;
}
Now I have create an ogg decoder in media foundation.
I have decode the ogg data to PCM data in IMFTransform::ProcessOutput.
but I cannot play the PCM data, so now how to play the pcm data?
This is my ProcessOutput code:
HRESULT OggDecoder:: ProcessOutput(
DWORD dwFlags, DWORD cOutputBufferCount,
MFT_OUTPUT_DATA_BUFFER *pOutputSamples, // one per stream
DWORD *pdwStatus )
{
if (dwFlags != 0)
{
return E_INVALIDARG;
}
if (pOutputSamples == NULL || pdwStatus == NULL)
{
return E_POINTER;
}
// Must be exactly one output buffer.
if (cOutputBufferCount != 1)
{
return E_INVALIDARG;
}
// It must contain a sample.
if (pOutputSamples[0].pSample == NULL)
{
return E_INVALIDARG;
}
EnterCriticalSection(&m_critSec);
HRESULT hr = S_OK;
DWORD cbData = 0;
IMFMediaBuffer *pOutput = NULL;
// If we don't have an input sample, we need some input before
// we can generate any output.
if (!HasPendingOutput())
{
hr = MF_E_TRANSFORM_NEED_MORE_INPUT;
}
// Get the output buffer.
if (SUCCEEDED(hr))
{
hr = pOutputSamples[0].pSample->GetBufferByIndex(0, &pOutput);
}
if (SUCCEEDED(hr))
{
hr = pOutput->GetMaxLength(&cbData);
}
if (SUCCEEDED(hr))
{
BYTE* pPCM=NULL;
pOutputBuffer->Lock(&pPCM,NULL,NULL);
GetPCMData(&pPCM); // decode audio data here
pOutputBuffer->SetCurrentLength(nLength);
pOutputSamples[0].pSample->SetSampleTime(sampleTime);
pOutputSamples[0].pSample->SetSampleDuration(sampleDuration);
pOutputBuffer->Unlock();
}
SafeRelease(&pOutput);
LeaveCriticalSection(&m_critSec);
return hr;
}
Is there I missing something or what' wrong with this code.
thanks.
if you use topoedit.exe for debug, it can add one resampler DMO automatically which is a DMO for converting pcm to float format.
you can write the player app, and create the topology by youself, and then you add the resamplyer dmo node.
I have direct show filter which takes an input and process it and give the result to outputpin.
I want to write this filter output data to a file...And i want to do it in its filter class.So i want to get the output pin buffer data.
Shortly how to reach final data of outputpin in its filter? How can i do it?
Not: The output pin is derived from CBaseOutputPin.This is an open source filter it "magically" :-) put wright data to its output pin which i can not figure out how yet...
Update:
Here is the siutuation:
Media Source ----> GFilter ----> FileWriter
I have source code of GFilter... I have no source code of FileWriter...What i want to make is make GFilter write its own data...I debug GFilter get some insight how its transform data but my attemp to write this data result with wrong data... So i deceide for now how to simply get data at its output pin...
Update[2]
In Filter outputpin somwhere the filter writer pass the file writer pin to IStreamPtr variable...Everthing seems to written to a variable m_pIStream which is type of [IStreamPtr]
GFilterOutput::CompleteConnect(IPin *pReceivePin)
{
// make sure that this is the file writer, supporting
// IStream, or we will not be able to write out the metadata
// at stop time
// m_pIStream is IStreamPtr type
m_pIStream = pReceivePin;
if (m_pIStream == NULL)
{
return E_NOINTERFACE;
}
return CBaseOutputPin::CompleteConnect(pReceivePin);
}
...
GFilterOutput::Replace(LONGLONG pos, const BYTE* pBuffer, long cBytes)
{
//OutputDebugStringA("DEBUG: Now at MuxOutput Replace");
// all media content is written when the graph is running,
// using IMemInputPin. On stop (during our stop, but after the
// file writer has stopped), we switch to IStream for the metadata.
// The in-memory index is updated after a successful call to this function, so
// any data not written on completion of Stop will not be in the index.
CAutoLock lock(&m_csWrite);
HRESULT hr = S_OK;
if (m_bUseIStream)
{
IStreamPtr pStream = GetConnected();
if (m_pIStream == NULL)
{
hr = E_NOINTERFACE;
} else {
LARGE_INTEGER liTo;
liTo.QuadPart = pos;
ULARGE_INTEGER uliUnused;
hr = m_pIStream->Seek(liTo, STREAM_SEEK_SET, &uliUnused);
if (SUCCEEDED(hr))
{
ULONG cActual;
hr = m_pIStream->Write(pBuffer, cBytes, &cActual);
if (SUCCEEDED(hr) && ((long)cActual != cBytes))
{
hr = E_FAIL;
}
}
}
} else {
// where the buffer boundaries lie is not important in this
// case, so break writes up into the buffers.
while (cBytes && (hr == S_OK))
{
IMediaSamplePtr pSample;
hr = GetDeliveryBuffer(&pSample, NULL, NULL, 0);
if (SUCCEEDED(hr))
{
long cThis = min(pSample->GetSize(), cBytes);
BYTE* pDest;
pSample->GetPointer(&pDest);
CopyMemory(pDest, pBuffer, cThis);
pSample->SetActualDataLength(cThis);
// time stamps indicate file position in bytes
LONGLONG tStart = pos;
LONGLONG tEnd = pos + cThis;
pSample->SetTime(&tStart, &tEnd);
hr = Deliver(pSample);
if (SUCCEEDED(hr))
{
pBuffer += cThis;
cBytes -= cThis;
pos += cThis;
}
}
}
}
return hr;
}
You have full source code, step it through with debugger until you reach the point where your filter calls IPin::Receive of the peer downstream filter, update/override code there and you have full control as for writing data into file etc.