Discussion:
[gst-devel] Newbie help. Cannot output audio/video simultaneously (with DirectFB)
(too old to reply)
d***@iremo.com
2008-02-26 10:10:30 UTC
Permalink
Hello,

I'm new to Gstreamer and new to this mail subscription but I'm hoping
someone will try to help me ;)

I trying to playback MPEG files using DirectFB. The code below does the
job but it won't let me output
video and audio at the same time. It's either audio only or video only.

What have I not understood completely?

I put the src and the demuxer in the pipeline and made 2 bins for
audio/video and added ghost pads.
This seems to work well but not simultaneously.

I would appreciate any help.

Thanks,

Daniel



#include <string.h>
#include <directfb.h>
#include <gst/gst.h>

static IDirectFB *dfb = NULL;
static IDirectFBSurface *primary = NULL;
static GMainLoop *loop;

GstElement *pipeline, *bin_audio, *bin_video, *source, *parser,
*decoder_audio, *decoder_video,
*convert_audio, *convert_video, *sink_audio, *sink_video;

GstPad *pad_video, *pad_audio;

#define DFBCHECK(x...) \
{ \
DFBResult err = x; \
\
if (err != DFB_OK) \
{ \
fprintf( stderr, "%s <%d>:\n\t", __FILE__, __LINE__ ); \
DirectFBErrorFatal( #x, err ); \
} \
}

static gboolean
get_me_out (gpointer data)
{
g_main_loop_quit (loop);
return FALSE;
}


static void
new_pad (GstElement *element,
GstPad *pad, //src
gpointer data)
{

gchar *name;
name = gst_pad_get_name (pad);

if ( NULL != strstr(name, "video"))
{
GstPad *sinkpad; //sink
g_print ("Dynamic pad created, linking parser/decoder '%s'\n", name);
sinkpad = gst_element_get_pad (bin_video, "sink");
gst_pad_link (pad, sinkpad);
gst_object_unref (sinkpad);
}

if ( NULL != strstr(name, "audio"))
{
GstPad *sinkpad; //sink
g_print ("Dynamic pad created, linking parser/decoder '%s'\n", name);
sinkpad = gst_element_get_pad (bin_audio, "sink");
gst_pad_link (pad, sinkpad);
gst_object_unref (sinkpad);
}

}

int
main (int argc, char *argv[])
{
DFBSurfaceDescription dsc;
GstBus *bus;


/* Init both GStreamer and DirectFB */
DFBCHECK (DirectFBInit (&argc, &argv));
gst_init (&argc, &argv);

loop = g_main_loop_new (NULL, FALSE);

/* Creates DirectFB main context and set it to fullscreen layout */
DFBCHECK (DirectFBCreate (&dfb));
DFBCHECK (dfb->SetCooperativeLevel (dfb, DFSCL_FULLSCREEN));

/* We want a double buffered primary surface */
dsc.flags = DSDESC_CAPS;
dsc.caps = DSCAPS_PRIMARY | DSCAPS_FLIPPING;

DFBCHECK (dfb->CreateSurface (dfb, &dsc, &primary));


// create elements
pipeline = gst_pipeline_new (NULL);
bin_audio = gst_bin_new (NULL);
bin_video = gst_bin_new (NULL);

source = gst_element_factory_make ("filesrc", NULL); // videotestsrc

parser = gst_element_factory_make ("mpegdemux", NULL); // dvddemux,
mpegparse, mpegvideoparse

decoder_audio = gst_element_factory_make ("mad", NULL);
decoder_video = gst_element_factory_make ("mpeg2dec", NULL);

convert_audio = gst_element_factory_make ("audioconvert", NULL);
convert_video = gst_element_factory_make("ffmpegcolorspace", NULL);

sink_audio = gst_element_factory_make ("alsasink", NULL);
sink_video = gst_element_factory_make ("dfbvideosink", NULL);


// That's the interesting part, giving the primary surface to
dfbvideosink
g_object_set (sink_video, "surface", primary, NULL);

// set filename property on the file source
g_object_set (G_OBJECT (source), "location", argv[1], NULL);

// add source and parser
gst_bin_add_many (GST_BIN (pipeline), source, parser, NULL);

// put all elements in a bin
gst_bin_add_many (GST_BIN (bin_video), decoder_video, convert_video,
sink_video, NULL);
gst_bin_add_many (GST_BIN (bin_audio), decoder_audio, convert_audio,
sink_audio, NULL);


// add ghostpad to audio
pad_audio = gst_element_get_pad (decoder_audio, "sink");
gst_element_add_pad (bin_audio, gst_ghost_pad_new ("sink",
pad_audio));
gst_object_unref (GST_OBJECT (pad_audio));

// add ghostpad to video
pad_video = gst_element_get_pad (decoder_video, "sink");
gst_element_add_pad (bin_video, gst_ghost_pad_new ("sink",
pad_video));
gst_object_unref (GST_OBJECT (pad_video));


// add bins to pipeline
gst_bin_add_many (GST_BIN (pipeline), bin_video, bin_audio, NULL);

// link together - note that we cannot link the parser and decoder yet
gst_element_link (source, parser);
gst_element_link_many (decoder_video, convert_video, sink_video,
NULL);
gst_element_link_many (decoder_audio, convert_audio, sink_audio,
NULL);

g_signal_connect (parser, "pad-added", G_CALLBACK (new_pad), NULL);


// Now set to playing and iterate.
g_print ("Setting to PLAYING\n");
gst_element_set_state (pipeline, GST_STATE_PLAYING);

g_print ("Running\n");

// Get us out after xx seconds
g_timeout_add (5000, get_me_out, NULL);
g_main_loop_run (loop);


// Release elements and stop playback
gst_element_set_state (pipeline, GST_STATE_NULL);


// Free the main loop
g_main_loop_unref (loop);

// clean up nicely
g_print ("Returned, stopping playback\n");
gst_element_set_state (pipeline, GST_STATE_NULL);

g_print ("Deleting pipeline\n");
gst_object_unref (GST_OBJECT (pipeline));

/* Release DirectFB context and surface */
primary->Release (primary);
dfb->Release (dfb);

return 0;
}
Thijs Vermeir
2008-02-26 10:30:47 UTC
Permalink
Hello,

You have to add some queue element after the demuxer on both the audio
and video part.
Your pipeline must like this:
gst-launch filesrc location=movie.mpg ! mpegdemux name=demuxer
demuxer. ! queue ! mpeg2dec ! ffmpegcolorspace ! dfbvideosink
demuxer. ! queue ! mad ! audioconvert ! alsasink

Gr,
Thijs
Post by d***@iremo.com
Hello,
I'm new to Gstreamer and new to this mail subscription but I'm hoping
someone will try to help me ;)
I trying to playback MPEG files using DirectFB. The code below does the
job but it won't let me output
video and audio at the same time. It's either audio only or video only.
What have I not understood completely?
I put the src and the demuxer in the pipeline and made 2 bins for
audio/video and added ghost pads.
This seems to work well but not simultaneously.
I would appreciate any help.
Thanks,
Daniel
#include <string.h>
#include <directfb.h>
#include <gst/gst.h>
static IDirectFB *dfb = NULL;
static IDirectFBSurface *primary = NULL;
static GMainLoop *loop;
GstElement *pipeline, *bin_audio, *bin_video, *source, *parser,
*decoder_audio, *decoder_video,
*convert_audio, *convert_video, *sink_audio, *sink_video;
GstPad *pad_video, *pad_audio;
#define DFBCHECK(x...) \
{ \
DFBResult err = x; \
\
if (err != DFB_OK) \
{ \
fprintf( stderr, "%s <%d>:\n\t", __FILE__, __LINE__ ); \
DirectFBErrorFatal( #x, err ); \
} \
}
static gboolean
get_me_out (gpointer data)
{
g_main_loop_quit (loop);
return FALSE;
}
static void
new_pad (GstElement *element,
GstPad *pad, //src
gpointer data)
{
gchar *name;
name = gst_pad_get_name (pad);
if ( NULL != strstr(name, "video"))
{
GstPad *sinkpad; //sink
g_print ("Dynamic pad created, linking parser/decoder '%s'\n", name);
sinkpad = gst_element_get_pad (bin_video, "sink");
gst_pad_link (pad, sinkpad);
gst_object_unref (sinkpad);
}
if ( NULL != strstr(name, "audio"))
{
GstPad *sinkpad; //sink
g_print ("Dynamic pad created, linking parser/decoder '%s'\n", name);
sinkpad = gst_element_get_pad (bin_audio, "sink");
gst_pad_link (pad, sinkpad);
gst_object_unref (sinkpad);
}
}
int
main (int argc, char *argv[])
{
DFBSurfaceDescription dsc;
GstBus *bus;
/* Init both GStreamer and DirectFB */
DFBCHECK (DirectFBInit (&argc, &argv));
gst_init (&argc, &argv);
loop = g_main_loop_new (NULL, FALSE);
/* Creates DirectFB main context and set it to fullscreen layout */
DFBCHECK (DirectFBCreate (&dfb));
DFBCHECK (dfb->SetCooperativeLevel (dfb, DFSCL_FULLSCREEN));
/* We want a double buffered primary surface */
dsc.flags = DSDESC_CAPS;
dsc.caps = DSCAPS_PRIMARY | DSCAPS_FLIPPING;
DFBCHECK (dfb->CreateSurface (dfb, &dsc, &primary));
// create elements
pipeline = gst_pipeline_new (NULL);
bin_audio = gst_bin_new (NULL);
bin_video = gst_bin_new (NULL);
source = gst_element_factory_make ("filesrc", NULL); // videotestsrc
parser = gst_element_factory_make ("mpegdemux", NULL); // dvddemux,
mpegparse, mpegvideoparse
decoder_audio = gst_element_factory_make ("mad", NULL);
decoder_video = gst_element_factory_make ("mpeg2dec", NULL);
convert_audio = gst_element_factory_make ("audioconvert", NULL);
convert_video = gst_element_factory_make("ffmpegcolorspace", NULL);
sink_audio = gst_element_factory_make ("alsasink", NULL);
sink_video = gst_element_factory_make ("dfbvideosink", NULL);
// That's the interesting part, giving the primary surface to
dfbvideosink
g_object_set (sink_video, "surface", primary, NULL);
// set filename property on the file source
g_object_set (G_OBJECT (source), "location", argv[1], NULL);
// add source and parser
gst_bin_add_many (GST_BIN (pipeline), source, parser, NULL);
// put all elements in a bin
gst_bin_add_many (GST_BIN (bin_video), decoder_video, convert_video,
sink_video, NULL);
gst_bin_add_many (GST_BIN (bin_audio), decoder_audio, convert_audio,
sink_audio, NULL);
// add ghostpad to audio
pad_audio = gst_element_get_pad (decoder_audio, "sink");
gst_element_add_pad (bin_audio, gst_ghost_pad_new ("sink",
pad_audio));
gst_object_unref (GST_OBJECT (pad_audio));
// add ghostpad to video
pad_video = gst_element_get_pad (decoder_video, "sink");
gst_element_add_pad (bin_video, gst_ghost_pad_new ("sink",
pad_video));
gst_object_unref (GST_OBJECT (pad_video));
// add bins to pipeline
gst_bin_add_many (GST_BIN (pipeline), bin_video, bin_audio, NULL);
// link together - note that we cannot link the parser and decoder yet
gst_element_link (source, parser);
gst_element_link_many (decoder_video, convert_video, sink_video,
NULL);
gst_element_link_many (decoder_audio, convert_audio, sink_audio,
NULL);
g_signal_connect (parser, "pad-added", G_CALLBACK (new_pad), NULL);
// Now set to playing and iterate.
g_print ("Setting to PLAYING\n");
gst_element_set_state (pipeline, GST_STATE_PLAYING);
g_print ("Running\n");
// Get us out after xx seconds
g_timeout_add (5000, get_me_out, NULL);
g_main_loop_run (loop);
// Release elements and stop playback
gst_element_set_state (pipeline, GST_STATE_NULL);
// Free the main loop
g_main_loop_unref (loop);
// clean up nicely
g_print ("Returned, stopping playback\n");
gst_element_set_state (pipeline, GST_STATE_NULL);
g_print ("Deleting pipeline\n");
gst_object_unref (GST_OBJECT (pipeline));
/* Release DirectFB context and surface */
primary->Release (primary);
dfb->Release (dfb);
return 0;
}
-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2008.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/
_______________________________________________
gstreamer-devel mailing list
https://lists.sourceforge.net/lists/listinfo/gstreamer-devel
Danielkun
2008-02-26 13:38:04 UTC
Permalink
Thijs Vermeir and Jason Gerard DeRose,

Thank you very much for your fast replies!
I finally got it to work! ;-)

Daniel
Post by d***@iremo.com
Hello,
You have to add some queue element after the demuxer on both the audio
and video part.
gst-launch filesrc location=movie.mpg ! mpegdemux name=demuxer
demuxer. ! queue ! mpeg2dec ! ffmpegcolorspace ! dfbvideosink
demuxer. ! queue ! mad ! audioconvert ! alsasink
Gr,
Thijs
Post by d***@iremo.com
Hello,
I'm new to Gstreamer and new to this mail subscription but I'm hoping
someone will try to help me ;)
I trying to playback MPEG files using DirectFB. The code below does the
job but it won't let me output
video and audio at the same time. It's either audio only or video only.
What have I not understood completely?
I put the src and the demuxer in the pipeline and made 2 bins for
audio/video and added ghost pads.
This seems to work well but not simultaneously.
I would appreciate any help.
Thanks,
Daniel
#include <string.h>
#include <directfb.h>
#include <gst/gst.h>
static IDirectFB *dfb = NULL;
static IDirectFBSurface *primary = NULL;
static GMainLoop *loop;
GstElement *pipeline, *bin_audio, *bin_video, *source, *parser,
*decoder_audio, *decoder_video,
*convert_audio, *convert_video,
*sink_audio, *sink_video;
Post by d***@iremo.com
GstPad *pad_video, *pad_audio;
#define DFBCHECK(x...) \
{ \
DFBResult err = x; \
\
if (err != DFB_OK) \
{ \
fprintf( stderr, "%s <%d>:\n\t", __FILE__, __LINE__ ); \
DirectFBErrorFatal( #x, err ); \
} \
}
static gboolean
get_me_out (gpointer data)
{
g_main_loop_quit (loop);
return FALSE;
}
static void
new_pad (GstElement *element,
GstPad *pad, //src
gpointer data)
{
gchar *name;
name = gst_pad_get_name (pad);
if ( NULL != strstr(name, "video"))
{
GstPad *sinkpad; //sink
g_print ("Dynamic pad created, linking parser/decoder '%s'\n",
name);
Post by d***@iremo.com
sinkpad = gst_element_get_pad (bin_video, "sink");
gst_pad_link (pad, sinkpad);
gst_object_unref (sinkpad);
}
if ( NULL != strstr(name, "audio"))
{
GstPad *sinkpad; //sink
g_print ("Dynamic pad created, linking parser/decoder '%s'\n",
name);
Post by d***@iremo.com
sinkpad = gst_element_get_pad (bin_audio, "sink");
gst_pad_link (pad, sinkpad);
gst_object_unref (sinkpad);
}
}
int
main (int argc, char *argv[])
{
DFBSurfaceDescription dsc;
GstBus *bus;
/* Init both GStreamer and DirectFB */
DFBCHECK (DirectFBInit (&argc, &argv));
gst_init (&argc, &argv);
loop = g_main_loop_new (NULL, FALSE);
/* Creates DirectFB main context and set it to fullscreen layout */
DFBCHECK (DirectFBCreate (&dfb));
DFBCHECK (dfb->SetCooperativeLevel (dfb, DFSCL_FULLSCREEN));
/* We want a double buffered primary surface */
dsc.flags = DSDESC_CAPS;
dsc.caps = DSCAPS_PRIMARY | DSCAPS_FLIPPING;
DFBCHECK (dfb->CreateSurface (dfb, &dsc, &primary));
// create elements
pipeline = gst_pipeline_new (NULL);
bin_audio = gst_bin_new (NULL);
bin_video = gst_bin_new (NULL);
source = gst_element_factory_make ("filesrc", NULL); //
videotestsrc
Post by d***@iremo.com
parser = gst_element_factory_make ("mpegdemux", NULL); //
dvddemux,
Post by d***@iremo.com
mpegparse, mpegvideoparse
decoder_audio = gst_element_factory_make ("mad", NULL);
decoder_video = gst_element_factory_make ("mpeg2dec", NULL);
convert_audio = gst_element_factory_make ("audioconvert", NULL);
convert_video = gst_element_factory_make("ffmpegcolorspace", NULL);
sink_audio = gst_element_factory_make ("alsasink", NULL);
sink_video = gst_element_factory_make ("dfbvideosink", NULL);
// That's the interesting part, giving the primary surface to
dfbvideosink
g_object_set (sink_video, "surface", primary, NULL);
// set filename property on the file source
g_object_set (G_OBJECT (source), "location", argv[1], NULL);
// add source and parser
gst_bin_add_many (GST_BIN (pipeline), source, parser, NULL);
// put all elements in a bin
gst_bin_add_many (GST_BIN (bin_video), decoder_video, convert_video,
sink_video, NULL);
gst_bin_add_many (GST_BIN (bin_audio), decoder_audio, convert_audio,
sink_audio, NULL);
// add ghostpad to audio
pad_audio = gst_element_get_pad (decoder_audio, "sink");
gst_element_add_pad (bin_audio, gst_ghost_pad_new ("sink",
pad_audio));
gst_object_unref (GST_OBJECT (pad_audio));
// add ghostpad to video
pad_video = gst_element_get_pad (decoder_video, "sink");
gst_element_add_pad (bin_video, gst_ghost_pad_new ("sink",
pad_video));
gst_object_unref (GST_OBJECT (pad_video));
// add bins to pipeline
gst_bin_add_many (GST_BIN (pipeline), bin_video, bin_audio, NULL);
// link together - note that we cannot link the parser and decoder yet
gst_element_link (source, parser);
gst_element_link_many (decoder_video, convert_video, sink_video,
NULL);
gst_element_link_many (decoder_audio, convert_audio, sink_audio,
NULL);
g_signal_connect (parser, "pad-added", G_CALLBACK (new_pad), NULL);
// Now set to playing and iterate.
g_print ("Setting to PLAYING\n");
gst_element_set_state (pipeline, GST_STATE_PLAYING);
g_print ("Running\n");
// Get us out after xx seconds
g_timeout_add (5000, get_me_out, NULL);
g_main_loop_run (loop);
// Release elements and stop playback
gst_element_set_state (pipeline, GST_STATE_NULL);
// Free the main loop
g_main_loop_unref (loop);
// clean up nicely
g_print ("Returned, stopping playback\n");
gst_element_set_state (pipeline, GST_STATE_NULL);
g_print ("Deleting pipeline\n");
gst_object_unref (GST_OBJECT (pipeline));
/* Release DirectFB context and surface */
primary->Release (primary);
dfb->Release (dfb);
return 0;
}
------------------------------------------------------------------------
-
Post by d***@iremo.com
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2008.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/
_______________________________________________
gstreamer-devel mailing list
https://lists.sourceforge.net/lists/listinfo/gstreamer-devel
Jason Gerard DeRose
2008-02-26 10:33:34 UTC
Permalink
Daniel,

You'll need to add a queue element. I believe this will work with just
one queue, but it would probably be best to use one on the video side
and another on the audio side, right after your demuxer.

In general, any time you tee, like coming out of a demuxer, you will
need to use the queue element. The same is also true if you are mixing,
like going into a muxer.

I believe the reason for this is that different parts of the pipeline
will have different requirements for what makes up a frame of data to
process... but someone more knowledgeable, please correct me if I'm
wrong. ;)

Cheers,
Jason
Post by d***@iremo.com
Hello,
I'm new to Gstreamer and new to this mail subscription but I'm hoping
someone will try to help me ;)
I trying to playback MPEG files using DirectFB. The code below does the
job but it won't let me output
video and audio at the same time. It's either audio only or video only.
What have I not understood completely?
I put the src and the demuxer in the pipeline and made 2 bins for
audio/video and added ghost pads.
This seems to work well but not simultaneously.
I would appreciate any help.
Thanks,
Daniel
#include <string.h>
#include <directfb.h>
#include <gst/gst.h>
static IDirectFB *dfb = NULL;
static IDirectFBSurface *primary = NULL;
static GMainLoop *loop;
GstElement *pipeline, *bin_audio, *bin_video, *source, *parser,
*decoder_audio, *decoder_video,
*convert_audio, *convert_video, *sink_audio, *sink_video;
GstPad *pad_video, *pad_audio;
#define DFBCHECK(x...) \
{ \
DFBResult err = x; \
\
if (err != DFB_OK) \
{ \
fprintf( stderr, "%s <%d>:\n\t", __FILE__, __LINE__ ); \
DirectFBErrorFatal( #x, err ); \
} \
}
static gboolean
get_me_out (gpointer data)
{
g_main_loop_quit (loop);
return FALSE;
}
static void
new_pad (GstElement *element,
GstPad *pad, //src
gpointer data)
{
gchar *name;
name = gst_pad_get_name (pad);
if ( NULL != strstr(name, "video"))
{
GstPad *sinkpad; //sink
g_print ("Dynamic pad created, linking parser/decoder '%s'\n", name);
sinkpad = gst_element_get_pad (bin_video, "sink");
gst_pad_link (pad, sinkpad);
gst_object_unref (sinkpad);
}
if ( NULL != strstr(name, "audio"))
{
GstPad *sinkpad; //sink
g_print ("Dynamic pad created, linking parser/decoder '%s'\n", name);
sinkpad = gst_element_get_pad (bin_audio, "sink");
gst_pad_link (pad, sinkpad);
gst_object_unref (sinkpad);
}
}
int
main (int argc, char *argv[])
{
DFBSurfaceDescription dsc;
GstBus *bus;
/* Init both GStreamer and DirectFB */
DFBCHECK (DirectFBInit (&argc, &argv));
gst_init (&argc, &argv);
loop = g_main_loop_new (NULL, FALSE);
/* Creates DirectFB main context and set it to fullscreen layout */
DFBCHECK (DirectFBCreate (&dfb));
DFBCHECK (dfb->SetCooperativeLevel (dfb, DFSCL_FULLSCREEN));
/* We want a double buffered primary surface */
dsc.flags = DSDESC_CAPS;
dsc.caps = DSCAPS_PRIMARY | DSCAPS_FLIPPING;
DFBCHECK (dfb->CreateSurface (dfb, &dsc, &primary));
// create elements
pipeline = gst_pipeline_new (NULL);
bin_audio = gst_bin_new (NULL);
bin_video = gst_bin_new (NULL);
source = gst_element_factory_make ("filesrc", NULL); // videotestsrc
parser = gst_element_factory_make ("mpegdemux", NULL); // dvddemux,
mpegparse, mpegvideoparse
decoder_audio = gst_element_factory_make ("mad", NULL);
decoder_video = gst_element_factory_make ("mpeg2dec", NULL);
convert_audio = gst_element_factory_make ("audioconvert", NULL);
convert_video = gst_element_factory_make("ffmpegcolorspace", NULL);
sink_audio = gst_element_factory_make ("alsasink", NULL);
sink_video = gst_element_factory_make ("dfbvideosink", NULL);
// That's the interesting part, giving the primary surface to
dfbvideosink
g_object_set (sink_video, "surface", primary, NULL);
// set filename property on the file source
g_object_set (G_OBJECT (source), "location", argv[1], NULL);
// add source and parser
gst_bin_add_many (GST_BIN (pipeline), source, parser, NULL);
// put all elements in a bin
gst_bin_add_many (GST_BIN (bin_video), decoder_video, convert_video,
sink_video, NULL);
gst_bin_add_many (GST_BIN (bin_audio), decoder_audio, convert_audio,
sink_audio, NULL);
// add ghostpad to audio
pad_audio = gst_element_get_pad (decoder_audio, "sink");
gst_element_add_pad (bin_audio, gst_ghost_pad_new ("sink",
pad_audio));
gst_object_unref (GST_OBJECT (pad_audio));
// add ghostpad to video
pad_video = gst_element_get_pad (decoder_video, "sink");
gst_element_add_pad (bin_video, gst_ghost_pad_new ("sink",
pad_video));
gst_object_unref (GST_OBJECT (pad_video));
// add bins to pipeline
gst_bin_add_many (GST_BIN (pipeline), bin_video, bin_audio, NULL);
// link together - note that we cannot link the parser and decoder yet
gst_element_link (source, parser);
gst_element_link_many (decoder_video, convert_video, sink_video,
NULL);
gst_element_link_many (decoder_audio, convert_audio, sink_audio,
NULL);
g_signal_connect (parser, "pad-added", G_CALLBACK (new_pad), NULL);
// Now set to playing and iterate.
g_print ("Setting to PLAYING\n");
gst_element_set_state (pipeline, GST_STATE_PLAYING);
g_print ("Running\n");
// Get us out after xx seconds
g_timeout_add (5000, get_me_out, NULL);
g_main_loop_run (loop);
// Release elements and stop playback
gst_element_set_state (pipeline, GST_STATE_NULL);
// Free the main loop
g_main_loop_unref (loop);
// clean up nicely
g_print ("Returned, stopping playback\n");
gst_element_set_state (pipeline, GST_STATE_NULL);
g_print ("Deleting pipeline\n");
gst_object_unref (GST_OBJECT (pipeline));
/* Release DirectFB context and surface */
primary->Release (primary);
dfb->Release (dfb);
return 0;
}
-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2008.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/
_______________________________________________
gstreamer-devel mailing list
https://lists.sourceforge.net/lists/listinfo/gstreamer-devel
d***@iremo.com
2008-02-27 03:34:49 UTC
Permalink
Hello,

I have another question, this time regarding queues.

I succeeded to playback an MPEG file with DirectFB although the audio
and video is very choppy. I tried changing the properties for the queue
but it doesn't seem to help.

What am I missing?
(I pasted my current code at the bottom)

To Thijs:
Thank you again for your help.
gst-launch filesrc location=movie.mpg ! mpegdemux name=demuxer
demuxer. ! queue ! mpeg2dec ! ffmpegcolorspace ! dfbvideosink
demuxer. ! queue ! mad ! audioconvert ! alsasink
I was unsuccessful in executing the pipeline above although,
changing "demuxer." to "demuxer.video_00" got me a step further.
But I'm currently getting the following error:

gstbasesrc.c(2165): get_base_src_loop(): /pipeline0/filesrc0:
streaming task paused, reason not-negotiated (-4)
gst-launch filesrc location=movie.mpg ! mpegdemux name=demuxer
demuxer.audio_00 ! queue ! mad ! audioconvert ! alsasink
The above (audio only) works just fine.

Thank you,

Daniel


#include <string.h>
#include <directfb.h>
#include <gst/gst.h>

static IDirectFB *dfb = NULL;
static IDirectFBSurface *primary = NULL;
static GMainLoop *loop;

GstElement *pipeline, *source, *parser,
*queue_audio, *decoder_audio, *convert_audio, *sink_audio,
*queue_video, *decoder_video, *convert_video, *sink_video;

static gboolean
get_me_out (gpointer data)
{
g_main_loop_quit (loop);
return FALSE;
}

static void
new_pad (GstElement *element, GstPad *pad, gpointer data)
{
gchar *name;
name = gst_pad_get_name (pad);

if ( NULL != strstr(name, "video"))
{
GstPad *sinkpad;
g_print ("Dynamic pad created, linking parser/decoder '%s'\n", name);
sinkpad = gst_element_get_pad (queue_video, "sink");
gst_pad_link (pad, sinkpad); // pad, sink
gst_object_unref (sinkpad);
}

if ( NULL != strstr(name, "audio"))
{
GstPad *sinkpad;
g_print ("Dynamic pad created, linking parser/decoder '%s'\n", name);
sinkpad = gst_element_get_pad (queue_audio, "sink");
gst_pad_link (pad, sinkpad); // pad, sink
gst_object_unref (sinkpad);
}
}


int
main (int argc, char *argv[])
{
DFBSurfaceDescription dsc;
GstBus *bus;

// Init both GStreamer and DirectFB
DFBCHECK (DirectFBInit (&argc, &argv));
gst_init (&argc, &argv);

loop = g_main_loop_new (NULL, FALSE);

// Creates DirectFB main context and set it to fullscreen layout
DFBCHECK (DirectFBCreate (&dfb));
DFBCHECK (dfb->SetCooperativeLevel (dfb, DFSCL_FULLSCREEN));

// We want a double buffered primary surface
dsc.flags = DSDESC_CAPS;
dsc.caps = DSCAPS_PRIMARY | DSCAPS_FLIPPING;

DFBCHECK (dfb->CreateSurface (dfb, &dsc, &primary));


// create elements
pipeline = gst_pipeline_new (NULL);
source = gst_element_factory_make ("filesrc", NULL); // videotestsrc
parser = gst_element_factory_make ("mpegdemux", NULL); // dvddemux,
mpegparse, mpegvideoparse

queue_audio = gst_element_factory_make ("queue", NULL);
queue_video = gst_element_factory_make ("queue", NULL);

guint intval;
guint64 int64val;

g_object_set (G_OBJECT (queue_audio), "max-size-buffers", 400, NULL);
g_object_set (G_OBJECT (queue_audio), "max-size-time", 2000000000,
NULL);
g_object_set (G_OBJECT (queue_audio), "max-size-bytes", 20000000,
NULL);


g_object_get (G_OBJECT (queue_audio), "max-size-buffers", &intval,
NULL);
g_print("max-size-buffers:'%d'\n", intval);
// g_object_get (G_OBJECT (queue_audio), "max-size-time", &int64val,
NULL);
// g_print("max-size-time:'%llu'\n", int64val);
g_object_get (G_OBJECT (queue_audio), "max-size-bytes", &intval, NULL);
g_print("max-size-bytes:'%d'\n", intval);


decoder_audio = gst_element_factory_make ("mad", NULL);
decoder_video = gst_element_factory_make ("mpeg2dec", NULL);

convert_audio = gst_element_factory_make ("audioconvert", NULL);
convert_video = gst_element_factory_make("ffmpegcolorspace", NULL);

sink_audio = gst_element_factory_make ("alsasink", NULL);
sink_video = gst_element_factory_make ("dfbvideosink", NULL);


// that's the interesting part, giving the primary surface to
dfbvideosink
g_object_set (sink_video, "surface", primary, NULL);

// set filename property on the file source
g_object_set (G_OBJECT (source), "location", argv[1], NULL);


// add all elements to pipeline
gst_bin_add_many (GST_BIN (pipeline), source, parser,
queue_video, decoder_video, convert_video, sink_video,
queue_audio, decoder_audio, convert_audio, sink_audio, NULL);

// link together - note that we cannot link the parser and decoder yet
gst_element_link (source, parser);
gst_element_link_many (queue_video, decoder_video, convert_video,
sink_video, NULL);
gst_element_link_many (queue_audio, decoder_audio, convert_audio,
sink_audio, NULL);

// add callback to parser
g_signal_connect (parser, "pad-added", G_CALLBACK (new_pad), NULL);

// Now set to playing and iterate.
g_print ("Setting to PLAYING\n");
gst_element_set_state (pipeline, GST_STATE_PLAYING);
g_print ("Running\n");

// get us out after xx seconds
g_timeout_add (10000, get_me_out, NULL);
g_main_loop_run (loop);

// release elements and stop playback
gst_element_set_state (pipeline, GST_STATE_NULL);

// free the main loop
g_main_loop_unref (loop);

// clean up nicely
g_print ("Returned, stopping playback\n");
gst_element_set_state (pipeline, GST_STATE_NULL);
g_print ("Deleting pipeline\n");
gst_object_unref (GST_OBJECT (pipeline));

// release DirectFB context and surface
primary->Release (primary);
dfb->Release (dfb);

return 0;
}
d***@iremo.com
2008-02-27 05:01:16 UTC
Permalink
Hello again,

I sent an email earlier (same as the one below) asking why the
video/audio was choppy.
It turned out that I hadn't set the props correctly. (I was a little bit
too excited)
Sorry. The video/audio plays back just fine now.

Thanks

Daniel

---


Hello,

I have another question, this time regarding queues.

I succeeded to playback an MPEG file with DirectFB although the audio
and video is very choppy. I tried changing the properties for the queue
but it doesn't seem to help.

What am I missing?
(I pasted my current code at the bottom)

To Thijs:
Thank you again for your help.
gst-launch filesrc location=movie.mpg ! mpegdemux name=demuxer
demuxer. ! queue ! mpeg2dec ! ffmpegcolorspace ! dfbvideosink
demuxer. ! queue ! mad ! audioconvert ! alsasink
I was unsuccessful in executing the pipeline above although,
changing "demuxer." to "demuxer.video_00" got me a step further.
But I'm currently getting the following error:

gstbasesrc.c(2165): get_base_src_loop(): /pipeline0/filesrc0:
streaming task paused, reason not-negotiated (-4)
gst-launch filesrc location=movie.mpg ! mpegdemux name=demuxer
demuxer.audio_00 ! queue ! mad ! audioconvert ! alsasink
The above (audio only) works just fine.

Thank you,

Daniel


#include <string.h>
#include <directfb.h>
#include <gst/gst.h>

static IDirectFB *dfb = NULL;
static IDirectFBSurface *primary = NULL;
static GMainLoop *loop;

GstElement *pipeline, *source, *parser,
*queue_audio, *decoder_audio, *convert_audio, *sink_audio,
*queue_video, *decoder_video, *convert_video, *sink_video;

static gboolean
get_me_out (gpointer data)
{
g_main_loop_quit (loop);
return FALSE;
}

static void
new_pad (GstElement *element, GstPad *pad, gpointer data)
{
gchar *name;
name = gst_pad_get_name (pad);

if ( NULL != strstr(name, "video"))
{
GstPad *sinkpad;
g_print ("Dynamic pad created, linking parser/decoder '%s'\n", name);
sinkpad = gst_element_get_pad (queue_video, "sink");
gst_pad_link (pad, sinkpad); // pad, sink
gst_object_unref (sinkpad);
}

if ( NULL != strstr(name, "audio"))
{
GstPad *sinkpad;
g_print ("Dynamic pad created, linking parser/decoder '%s'\n", name);
sinkpad = gst_element_get_pad (queue_audio, "sink");
gst_pad_link (pad, sinkpad); // pad, sink
gst_object_unref (sinkpad);
}
}


int
main (int argc, char *argv[])
{
DFBSurfaceDescription dsc;
GstBus *bus;

// Init both GStreamer and DirectFB
DFBCHECK (DirectFBInit (&argc, &argv));
gst_init (&argc, &argv);

loop = g_main_loop_new (NULL, FALSE);

// Creates DirectFB main context and set it to fullscreen layout
DFBCHECK (DirectFBCreate (&dfb));
DFBCHECK (dfb->SetCooperativeLevel (dfb, DFSCL_FULLSCREEN));

// We want a double buffered primary surface
dsc.flags = DSDESC_CAPS;
dsc.caps = DSCAPS_PRIMARY | DSCAPS_FLIPPING;

DFBCHECK (dfb->CreateSurface (dfb, &dsc, &primary));


// create elements
pipeline = gst_pipeline_new (NULL);
source = gst_element_factory_make ("filesrc", NULL); // videotestsrc
parser = gst_element_factory_make ("mpegdemux", NULL); // dvddemux,
mpegparse, mpegvideoparse

queue_audio = gst_element_factory_make ("queue", NULL);
queue_video = gst_element_factory_make ("queue", NULL);

guint intval;
guint64 int64val;

g_object_set (G_OBJECT (queue_audio), "max-size-buffers", 400, NULL);
g_object_set (G_OBJECT (queue_audio), "max-size-time", 2000000000,
NULL);
g_object_set (G_OBJECT (queue_audio), "max-size-bytes", 20000000,
NULL);


g_object_get (G_OBJECT (queue_audio), "max-size-buffers", &intval,
NULL);
g_print("max-size-buffers:'%d'\n", intval);
// g_object_get (G_OBJECT (queue_audio), "max-size-time", &int64val,
NULL);
// g_print("max-size-time:'%llu'\n", int64val);
g_object_get (G_OBJECT (queue_audio), "max-size-bytes", &intval, NULL);
g_print("max-size-bytes:'%d'\n", intval);


decoder_audio = gst_element_factory_make ("mad", NULL);
decoder_video = gst_element_factory_make ("mpeg2dec", NULL);

convert_audio = gst_element_factory_make ("audioconvert", NULL);
convert_video = gst_element_factory_make("ffmpegcolorspace", NULL);

sink_audio = gst_element_factory_make ("alsasink", NULL);
sink_video = gst_element_factory_make ("dfbvideosink", NULL);


// that's the interesting part, giving the primary surface to
dfbvideosink
g_object_set (sink_video, "surface", primary, NULL);

// set filename property on the file source
g_object_set (G_OBJECT (source), "location", argv[1], NULL);


// add all elements to pipeline
gst_bin_add_many (GST_BIN (pipeline), source, parser,
queue_video, decoder_video, convert_video, sink_video,
queue_audio, decoder_audio, convert_audio, sink_audio, NULL);

// link together - note that we cannot link the parser and decoder yet
gst_element_link (source, parser);
gst_element_link_many (queue_video, decoder_video, convert_video,
sink_video, NULL);
gst_element_link_many (queue_audio, decoder_audio, convert_audio,
sink_audio, NULL);

// add callback to parser
g_signal_connect (parser, "pad-added", G_CALLBACK (new_pad), NULL);

// Now set to playing and iterate.
g_print ("Setting to PLAYING\n");
gst_element_set_state (pipeline, GST_STATE_PLAYING);
g_print ("Running\n");

// get us out after xx seconds
g_timeout_add (10000, get_me_out, NULL);
g_main_loop_run (loop);

// release elements and stop playback
gst_element_set_state (pipeline, GST_STATE_NULL);

// free the main loop
g_main_loop_unref (loop);

// clean up nicely
g_print ("Returned, stopping playback\n");
gst_element_set_state (pipeline, GST_STATE_NULL);
g_print ("Deleting pipeline\n");
gst_object_unref (GST_OBJECT (pipeline));

// release DirectFB context and surface
primary->Release (primary);
dfb->Release (dfb);

return 0;
}

Continue reading on narkive:
Loading...