Commit becfa065 authored by Mario Chirinos Colunga's avatar Mario Chirinos Colunga 💬

mario

parent 6b4a2e43
......@@ -19,148 +19,124 @@
#include <cstring>
using namespace std;
//------------------------------------------------------------------------------
/**
* Constructor
* @param source streaing url
* @param time split recording in segments of time seconds.
*/
StreamRecorder::StreamRecorder(const char* source, int time)
{
int nFrames = ceil(time*STREAMRECORDER_SAMPLERATE/READSIZE);
recordTime = nFrames*READSIZE/STREAMRECORDER_SAMPLERATE;
cout << "record time: " << recordTime << endl;
StreamRecorder::StreamRecorder(const char* source, int time) {
int nFrames = ceil(time*STREAMRECORDER_SAMPLERATE/READSIZE);
recordTime = nFrames*READSIZE/STREAMRECORDER_SAMPLERATE;
cout << "record time: " << recordTime << endl;
// buffer size
bufferSize=nFrames*READSIZE*STREAMRECORDER_BYTESPERSAMPLE;
// buffer that ig going to contain the audio
audioBuffer = new unsigned char[bufferSize];
// Puts all the elements in audio buffer in 0
memset (audioBuffer, 0, bufferSize);
// set the pointer to the actual position of the buffer;
audioBufferPosition=audioBuffer;
// Bytes saved counter
nBytes=0;
bufferSize=nFrames*READSIZE*STREAMRECORDER_BYTESPERSAMPLE;
audioBuffer = new unsigned char[bufferSize];
memset(audioBuffer, 0, bufferSize);
// flag that save the conncetion state
isConnectionLost=false;
audioBufferPosition=audioBuffer;
// Create the elements and sets capabilities
createMainPipeline();
nBytes=0;
isConnectionLost=false;
// Connec with the URI
connect(source);
createMainPipeline();
connect(source);
}
//------------------------------------------------------------------------------
/**
* Connect to the stream
* @param uri streaing uri
* @return unimplemented
*/
int StreamRecorder::connect(const char *uri)
{
disconnect();
cout << "connecting to " << uri << endl;
int StreamRecorder::connect(const char *uri) {
disconnect();
cout << "connecting to " << uri << endl;
// Uri destiny
pluginUri = (char*)uri;
gst_element_set_state (mainPipeline, GST_STATE_NULL);
gst_element_set_state (mainPipeline, GST_STATE_READY);
// Assign the uri property
g_object_set (G_OBJECT (streamSrc), "uri", uri, NULL);
gst_element_link (streamSrc, audioConvert);
// Main pipeline starts to work
gst_element_set_state (mainPipeline, GST_STATE_PLAYING);
return 0;
pluginUri = (char*)uri;
gst_element_set_state (mainPipeline, GST_STATE_NULL);
gst_element_set_state (mainPipeline, GST_STATE_READY);
g_object_set (G_OBJECT (streamSrc), "uri", uri, NULL);
gst_element_link (streamSrc, audioConvert);
gst_element_set_state (mainPipeline, GST_STATE_PLAYING);
return 0;
}
//------------------------------------------------------------------------------
/**
* disconnect from the stream
* @return unimplemented
*/
int StreamRecorder::disconnect() {
int StreamRecorder::disconnect()
{
gst_element_unlink (streamSrc, audioConvert);
gst_element_set_state (mainPipeline, GST_STATE_NULL);
return 0;
}
//------------------------------------------------------------------------------
/**
* Create main pipeline
* @return 0 on success else on error
*/
int StreamRecorder::createMainPipeline()
{
mainPipeline = gst_pipeline_new("stream-recorder");
GstBus* bus;
bus = gst_pipeline_get_bus(GST_PIPELINE (mainPipeline));
gst_bus_add_watch(bus, bus_callback, this);
gst_object_unref(GST_OBJECT (bus));
streamSrc = gst_element_factory_make("uridecodebin", "stream_source");
audioConvert = gst_element_factory_make ("audioconvert", "audio_convert");
//audioResample = gst_element_factory_make ("audioresample", "audio_resample");
filterCaps = gst_element_factory_make("capsfilter", "filter_cap");
GstCaps *fc = gst_caps_new_full(gst_structure_new ("audio/x-raw",
"channels", G_TYPE_INT, 1,
"rate", G_TYPE_INT, STREAMRECORDER_SAMPLERATE,
"format", G_TYPE_STRING, "S16LE",
"signed", G_TYPE_BOOLEAN, 1, //MUST BE SIGNED
"endianness", G_TYPE_INT, 1234,
NULL),
NULL);
g_object_set(G_OBJECT (filterCaps), "caps", fc, NULL);
queue0 = gst_element_factory_make("queue", "queue0");
filter = gst_element_factory_make("identity", "audio_filter");
g_signal_connect(filter, "handoff", G_CALLBACK (filter_handoff_callback), this);
queue1 = gst_element_factory_make("queue", "queue1");
fakeSink = gst_element_factory_make("fakesink", "fake_sink");
//audioSink = gst_element_factory_make("autoaudiosink", "speaker");
gst_bin_add_many (GST_BIN (mainPipeline), streamSrc, audioConvert, filterCaps, queue0, filter,queue1, fakeSink, NULL);
if(!gst_element_link_many(audioConvert, filterCaps, queue0, filter, queue1, fakeSink, NULL))
{
//gst_bin_add_many (GST_BIN (mainPipeline), streamSrc, audioConvert, filterCaps, queue0, filter, queue1, audioSink, NULL);
// if(!gst_element_link_many(audioConvert, filterCaps, queue0, filter, queue1, audioSink, NULL)){
int StreamRecorder::createMainPipeline() {
mainPipeline = gst_pipeline_new("stream-recorder");
GstBus* bus;
bus = gst_pipeline_get_bus(GST_PIPELINE (mainPipeline));
gst_bus_add_watch(bus, bus_callback, this);
gst_object_unref(GST_OBJECT (bus));
streamSrc = gst_element_factory_make("uridecodebin", "stream_source");
audioConvert = gst_element_factory_make ("audioconvert", "audio_convert");
//audioResample = gst_element_factory_make ("audioresample", "audio_resample");
filterCaps = gst_element_factory_make("capsfilter", "filter_cap");
GstCaps *fc = gst_caps_new_full(gst_structure_new ("audio/x-raw",
"channels", G_TYPE_INT, 1,
"rate", G_TYPE_INT, STREAMRECORDER_SAMPLERATE,
"format", G_TYPE_STRING, "S16LE",
"signed", G_TYPE_BOOLEAN, 1, //MUST BE SIGNED
"endianness", G_TYPE_INT, 1234,
NULL),
NULL);
g_object_set(G_OBJECT (filterCaps), "caps", fc, NULL);
queue0 = gst_element_factory_make("queue", "queue0");
filter = gst_element_factory_make("identity", "audio_filter");
g_signal_connect(filter, "handoff", G_CALLBACK (filter_handoff_callback), this);
queue1 = gst_element_factory_make("queue", "queue1");
fakeSink = gst_element_factory_make("fakesink", "fake_sink");
//audioSink = gst_element_factory_make("autoaudiosink", "speaker");
gst_bin_add_many (GST_BIN (mainPipeline), streamSrc, audioConvert, filterCaps, queue0, filter,queue1, fakeSink, NULL);
if(!gst_element_link_many(audioConvert, filterCaps, queue0, filter, queue1, fakeSink, NULL)){
//gst_bin_add_many (GST_BIN (mainPipeline), streamSrc, audioConvert, filterCaps, queue0, filter, queue1, audioSink, NULL);
// if(!gst_element_link_many(audioConvert, filterCaps, queue0, filter, queue1, audioSink, NULL)){
cerr << "mainPipeline: Failed to link elements in the pipeline" << endl;
exit(0);
cerr << "mainPipeline: Failed to link elements in the pipeline" << endl;
exit(0);
}
g_signal_connect(streamSrc, "pad-added", G_CALLBACK(srcNewPad_callback), this);
gst_element_set_state (mainPipeline, GST_STATE_NULL);
return 0;
}
g_signal_connect(streamSrc, "pad-added", G_CALLBACK(srcNewPad_callback), this);
gst_element_set_state (mainPipeline, GST_STATE_NULL);
return 0;
}
//------------------------------------------------------------------------------
gboolean StreamRecorder::reconnectURIStream(void *instance){
cout << "Restarting the main pipeline" << endl;
((StreamRecorder*)instance)->connect(((StreamRecorder*)instance)->pluginUri);
return FALSE;
gboolean StreamRecorder::reconnectURIStream(void *instance)
{
cout << "reconnectURIStream" << endl;
((StreamRecorder*)instance)->connect(((StreamRecorder*)instance)->pluginUri);
return FALSE;
}
//------------------------------------------------------------------------------
/**
* disconnect from the stream
* @param the GstBus that sent the message
......@@ -168,171 +144,147 @@ gboolean StreamRecorder::reconnectURIStream(void *instance){
* @param user_data NULL
* @return unimplemented
*/
int StreamRecorder::bus_callback (GstBus *bus, GstMessage *message, void *user_data) {
//printf("StreamRecorder got %s message\n", GST_MESSAGE_TYPE_NAME (message));
if(GST_MESSAGE_TYPE (message) == GST_MESSAGE_EOS){
((StreamRecorder*)user_data)->isConnectionLost = true;
}
int StreamRecorder::bus_callback (GstBus *bus, GstMessage *message, void *user_data)
{
//printf("StreamRecorder got %s message\n", GST_MESSAGE_TYPE_NAME (message));
if(GST_MESSAGE_TYPE (message) == GST_MESSAGE_EOS)
{
((StreamRecorder*)user_data)->isConnectionLost = true;
}
switch (GST_MESSAGE_TYPE (message)) {
case GST_MESSAGE_EOS:
switch (GST_MESSAGE_TYPE (message))
{
case GST_MESSAGE_EOS:
cout << "End of stream" << endl;
cout << "End sometimes src" << endl;
cout << "End sometimes src" << endl;
break;
case GST_MESSAGE_ERROR:
case GST_MESSAGE_ERROR:
gchar *debug;
GError *error;
gst_message_parse_error (message, &error, &debug);
g_free (debug);
cerr << "Error: "<< error->message << endl; //Print specific error
g_error_free (error);
// Print specific error
cerr << "Error: "<< error->message << endl;
g_error_free (error);
// Try to reconnect with the uri stream
g_timeout_add(60*1000, reconnectURIStream, user_data);
// Enter only if the connection is lost
if(((StreamRecorder*)user_data)->isConnectionLost){
savePartialBuffer(user_data);
((StreamRecorder*)user_data)->isConnectionLost = false;
}
g_timeout_add(60*1000, reconnectURIStream, user_data); //Try to reconnect with the uri stream
if(((StreamRecorder*)user_data)->isConnectionLost) //Enter only if the connection is lost
{
savePartialBuffer(user_data);
((StreamRecorder*)user_data)->isConnectionLost = false;
}
break;
default:
default:
break;
}
}
return TRUE;
}
//------------------------------------------------------------------------------
void StreamRecorder::savePartialBuffer(void *user_data){
// Calculate the number of bytes that are missing for reach the max buffer size
// The buffer size is obtained based on the maximum recording time
int missingBytes = ((StreamRecorder*)user_data)->bufferSize - ((StreamRecorder*)user_data)->nBytes;
// Put the buffer position in the last position of the buffer
((StreamRecorder*)user_data)->audioBufferPosition+=missingBytes;
// Save in flac format
((StreamRecorder*)user_data)->compressBuffer();
// Restart the possition pointer
((StreamRecorder*)user_data)->audioBufferPosition=((StreamRecorder*)user_data)->audioBuffer;
// Restart the buffer default values
memset (((StreamRecorder*)user_data)->audioBuffer, 0, ((StreamRecorder*)user_data)->bufferSize);
((StreamRecorder*)user_data)->nBytes=0;
void StreamRecorder::savePartialBuffer(void *user_data)
{
int missingBytes = ((StreamRecorder*)user_data)->bufferSize - ((StreamRecorder*)user_data)->nBytes;
((StreamRecorder*)user_data)->audioBufferPosition+=missingBytes;
((StreamRecorder*)user_data)->compressBuffer();
((StreamRecorder*)user_data)->audioBufferPosition=((StreamRecorder*)user_data)->audioBuffer;
memset (((StreamRecorder*)user_data)->audioBuffer, 0, ((StreamRecorder*)user_data)->bufferSize);
((StreamRecorder*)user_data)->nBytes=0;
}
//------------------------------------------------------------------------------
/**
* CallBack to link the pads created by uridecodebin
* @param element The uridecodebin element
* @param pad The pad added
* @param data this
*/
void StreamRecorder::srcNewPad_callback(GstElement *element, GstPad *pad, void *data) {
cout << gst_element_get_name(element)<< " adding pad.." << gst_pad_get_name (pad) << endl;
cout << "Pad Name: " << gst_pad_get_name (pad) << endl;
GstPad *sinkpad;
//gst_pad_get_caps is for gst v0.1
// GstCaps *new_pad_caps = gst_pad_get_caps (pad);
void StreamRecorder::srcNewPad_callback(GstElement *element, GstPad *pad, void *data)
{
GstCaps *new_pad_caps = gst_pad_query_caps(pad, NULL);
GstStructure *new_pad_struct = gst_caps_get_structure (new_pad_caps, 0);
const gchar *new_pad_type = gst_structure_get_name (new_pad_struct);
cout << gst_element_get_name(element)<< " adding pad.." << gst_pad_get_name (pad) << endl;
cout << "Pad Name: " << gst_pad_get_name (pad) << endl;
GstPad *sinkpad;
//gst_pad_get_caps is for gst v0.1
// GstCaps *new_pad_caps = gst_pad_get_caps (pad);
GstCaps *new_pad_caps = gst_pad_query_caps(pad, NULL);
GstStructure *new_pad_struct = gst_caps_get_structure (new_pad_caps, 0);
const gchar *new_pad_type = gst_structure_get_name (new_pad_struct);
if (g_str_has_prefix (new_pad_type, "audio/x-raw")) {
cout << "linking " << new_pad_type << endl;
GstElement *nextElement = ((StreamRecorder*)data)->audioConvert;
sinkpad = gst_element_get_static_pad (nextElement, "sink");
if (GST_PAD_LINK_FAILED (gst_pad_link (pad, sinkpad))) {
cerr << "Type is "<< new_pad_type <<" but link failed." << endl;
exit(0);
}
else {
cout <<"Link succeeded " << new_pad_type << endl;
}
}
if (g_str_has_prefix (new_pad_type, "audio/x-raw"))
{
cout << "linking " << new_pad_type << endl;
GstElement *nextElement = ((StreamRecorder*)data)->audioConvert;
sinkpad = gst_element_get_static_pad (nextElement, "sink");
if (GST_PAD_LINK_FAILED (gst_pad_link (pad, sinkpad)))
{
cerr << "Type is "<< new_pad_type <<" but link failed." << endl;
exit(0);
}
else
cout <<"Link succeeded " << new_pad_type << endl;
}
}
//------------------------------------------------------------------------------
/**
* Save audio data (audioBuffer) in flac format
* @return unimplemented
*/
int StreamRecorder::compressBuffer() {
long int currentTime = time(NULL);
stringstream ss;
string fileNameStr, currentTimeStr;
ss << currentTime << endl;
getline(ss, currentTimeStr);
ss << currentTimeStr << ".flac" << endl;
getline(ss, fileNameStr);
int readsize = READSIZE;
FLAC__bool ok = true;
FLAC__StreamEncoder *encoder = 0;
FLAC__StreamMetadata *metadata[2];
FLAC__StreamMetadata_VorbisComment_Entry entry;
FLAC__StreamEncoderInitStatus init_status;
/* allocate the encoder */
if((encoder = FLAC__stream_encoder_new()) == NULL) {
fprintf(stderr, "ERROR: allocating encoder\n");
return 1;
int StreamRecorder::compressBuffer()
{
long int currentTime = time(NULL);
stringstream ss;
string fileNameStr, currentTimeStr;
ss << currentTime << endl;
getline(ss, currentTimeStr);
ss << currentTimeStr << ".flac" << endl;
getline(ss, fileNameStr);
int readsize = READSIZE;
FLAC__bool ok = true;
FLAC__StreamEncoder *encoder = 0;
FLAC__StreamMetadata *metadata[2];
FLAC__StreamMetadata_VorbisComment_Entry entry;
FLAC__StreamEncoderInitStatus init_status;
/* allocate the encoder */
if((encoder = FLAC__stream_encoder_new()) == NULL)
{
fprintf(stderr, "ERROR: allocating encoder\n");
return 1;
}
ok &= FLAC__stream_encoder_set_verify(encoder, true);
ok &= FLAC__stream_encoder_set_compression_level(encoder, 5);
ok &= FLAC__stream_encoder_set_channels(encoder, 1);
ok &= FLAC__stream_encoder_set_bits_per_sample(encoder, STREAMRECORDER_BYTESPERSAMPLE*8);
ok &= FLAC__stream_encoder_set_sample_rate(encoder, STREAMRECORDER_SAMPLERATE);
ok &= FLAC__stream_encoder_set_total_samples_estimate(encoder, nBytes/STREAMRECORDER_BYTESPERSAMPLE);
ok &= FLAC__stream_encoder_set_verify(encoder, true);
ok &= FLAC__stream_encoder_set_compression_level(encoder, 5);
ok &= FLAC__stream_encoder_set_channels(encoder, 1);
ok &= FLAC__stream_encoder_set_bits_per_sample(encoder, STREAMRECORDER_BYTESPERSAMPLE*8);
ok &= FLAC__stream_encoder_set_sample_rate(encoder, STREAMRECORDER_SAMPLERATE);
ok &= FLAC__stream_encoder_set_total_samples_estimate(encoder, nBytes/STREAMRECORDER_BYTESPERSAMPLE);
/* now add some metadata; we'll add some tags and a padding block */
if(ok) {
if(
(metadata[0] = FLAC__metadata_object_new(FLAC__METADATA_TYPE_VORBIS_COMMENT)) == NULL ||
(metadata[1] = FLAC__metadata_object_new(FLAC__METADATA_TYPE_PADDING)) == NULL ||
/* there are many tag (vorbiscomment) functions but these are convenient for this particular use: */
!FLAC__metadata_object_vorbiscomment_entry_from_name_value_pair(&entry, "ARTIST", "artist") ||
!FLAC__metadata_object_vorbiscomment_append_comment(metadata[0], entry, /*copy=*/false) || /* copy=false: let metadata object take control of entry's allocated string */
!FLAC__metadata_object_vorbiscomment_entry_from_name_value_pair(&entry, "YEAR", "year") ||
!FLAC__metadata_object_vorbiscomment_append_comment(metadata[0], entry, /*copy=*/false)
)
/* now add some metadata; we'll add some tags and a padding block */
if(ok)
{
if(
(metadata[0] = FLAC__metadata_object_new(FLAC__METADATA_TYPE_VORBIS_COMMENT)) == NULL ||
(metadata[1] = FLAC__metadata_object_new(FLAC__METADATA_TYPE_PADDING)) == NULL ||
/* there are many tag (vorbiscomment) functions but these are convenient for this particular use: */
!FLAC__metadata_object_vorbiscomment_entry_from_name_value_pair(&entry, "ARTIST", "artist") ||
!FLAC__metadata_object_vorbiscomment_append_comment(metadata[0], entry, /*copy=*/false) || /* copy=false: let metadata object take control of entry's allocated string */
!FLAC__metadata_object_vorbiscomment_entry_from_name_value_pair(&entry, "YEAR", "year") ||
!FLAC__metadata_object_vorbiscomment_append_comment(metadata[0], entry, /*copy=*/false)
)
{
fprintf(stderr, "ERROR: out of memory or tag error\n");
ok = false;
}
metadata[1]->length = 1234; /* set the padding length */
ok = FLAC__stream_encoder_set_metadata(encoder, metadata, 2);
}
......@@ -347,87 +299,84 @@ int StreamRecorder::compressBuffer() {
}
}
int channels = 1;
int channels = 1;
// int bps = 16;
unsigned char* audioBufferTmp = audioBuffer;
unsigned char* buffer = new unsigned char[readsize*STREAMRECORDER_BYTESPERSAMPLE*channels];
int* pcm = new int[readsize*channels];
/* read blocks of samples from WAVE file and feed to encoder */
if(ok) {
int left = nBytes;
while(left&&ok) {
int need = (left>readsize? readsize : left);
memcpy(buffer, audioBufferTmp, need);//*sizeof(short));
audioBufferTmp+=need;//*sizeof(short);
// /* convert the packed little-endian 16-bit PCM samples from WAVE into an interleaved FLAC__int32 buffer for libFLAC */
for(int i=0; i < need*channels; i++) {
/* inefficient but simple and works on big- or little-endian machines */
pcm[i] = (FLAC__int32)(((FLAC__int16)(FLAC__int8)buffer[2*i+1] << 8) | (FLAC__int16)buffer[2*i]);
if(ok)
{
int left = nBytes;
while(left&&ok)
{
int need = (left>readsize? readsize : left);
memcpy(buffer, audioBufferTmp, need);//*sizeof(short));
audioBufferTmp+=need;//*sizeof(short);
// /* convert the packed little-endian 16-bit PCM samples from WAVE into an interleaved FLAC__int32 buffer for libFLAC */
for(int i=0; i < need*channels; i++)
{
/* inefficient but simple and works on big- or little-endian machines */
pcm[i] = (FLAC__int32)(((FLAC__int16)(FLAC__int8)buffer[2*i+1] << 8) | (FLAC__int16)buffer[2*i]);
}
/* feed samples to encoder */
ok = FLAC__stream_encoder_process_interleaved(encoder, pcm, need/STREAMRECORDER_BYTESPERSAMPLE);
left -= need;
}
}
else {
cout << "-ERROR-" << endl;
}
ok &= FLAC__stream_encoder_finish(encoder);
fprintf(stderr, "encoding: %s\n", ok? "succeeded" : "FAILED");
fprintf(stderr, " state: %s\n", FLAC__StreamEncoderStateString[FLAC__stream_encoder_get_state(encoder)]);
/* now that encoding is finished, the metadata can be freed */
FLAC__metadata_object_delete(metadata[0]);
FLAC__metadata_object_delete(metadata[1]);
FLAC__stream_encoder_delete(encoder);
else
cout << "-ERROR-" << endl;
delete[] buffer;
delete[] pcm;
return 0;
ok &= FLAC__stream_encoder_finish(encoder);
fprintf(stderr, "encoding: %s\n", ok? "succeeded" : "FAILED");
fprintf(stderr, " state: %s\n", FLAC__StreamEncoderStateString[FLAC__stream_encoder_get_state(encoder)]);
/* now that encoding is finished, the metadata can be freed */
FLAC__metadata_object_delete(metadata[0]);
FLAC__metadata_object_delete(metadata[1]);
FLAC__stream_encoder_delete(encoder);
delete[] buffer;
delete[] pcm;
return 0;
}
//------------------------------------------------------------------------------
/**
* Add audio data to audioBuffer
* @param data Audio data to add
* @param length Data length
* @return Bytes writen
*/
int StreamRecorder::addToBuffer(unsigned char* data, int length) {
//cout << "addToBuffer(" << length << ")" << endl;
int bytesRead = length;// READSIZE*STREAMRECORDER_BYTESPERSAMPLE;
memcpy((char*)audioBufferPosition, (char*)data, bytesRead);
cout << *audioBufferPosition << endl;
audioBufferPosition+=bytesRead;
nBytes+=bytesRead;//READSIZE;
cout << "Bytes readed " << nBytes << endl;
cout << "Buffer size " << bufferSize << endl;
//cout << "addToBuffer(" << length << ")" << endl;
int bytesRead = length;// READSIZE*STREAMRECORDER_BYTESPERSAMPLE;
memcpy((char*)audioBufferPosition, (char*)data, bytesRead);
cout << *audioBufferPosition << endl;
audioBufferPosition+=bytesRead;
nBytes+=bytesRead;//READSIZE;
cout << "Bytes readed " << nBytes << endl;
cout << "Buffer size " << bufferSize << endl;
if(nBytes >= bufferSize) {
compressBuffer();
audioBufferPosition=audioBuffer;
memset (audioBuffer, 0, bufferSize);
nBytes=0;
}
if(nBytes >= bufferSize)
{
compressBuffer();
audioBufferPosition=audioBuffer;
memset (audioBuffer, 0, bufferSize);
nBytes=0;
}
return nBytes;
return nBytes;
}
//------------------------------------------------------------------------------
/**
* CallBack for handoff signal of identity filter
* @param filter Identity filter
......@@ -435,22 +384,23 @@ int StreamRecorder::addToBuffer(unsigned char* data, int length) {
* @param user_data this
* @return unimplemented
*/
int StreamRecorder::filter_handoff_callback(GstElement* filter, GstBuffer* buffer, void* user_data) {
cout << "LLEGO INFO" << endl;
GstMapInfo info;
if(!gst_buffer_map (buffer, &info, GST_MAP_READ)) {
cout << "ERROR: MAPPING IS NOT VALID" << endl;
}
int StreamRecorder::filter_handoff_callback(GstElement* filter, GstBuffer* buffer, void* user_data)
{
//GST_BUFFER_DATA is for gst v0.1
// ((StreamRecorder*)user_data)->addToBuffer((unsigned char*)GST_BUFFER_DATA (buffer));
cout << "LLEGO INFO" << endl;
GstMapInfo info;
if(!gst_buffer_map (buffer, &info, GST_MAP_READ))
{
cout << "ERROR: MAPPING IS NOT VALID" << endl;
}
// user data is the class
((StreamRecorder*)user_data)->addToBuffer((unsigned char*)info.data, info.size);
gst_buffer_unmap (buffer, &info);
return 0;
//GST_BUFFER_DATA is for gst v0.1
// ((StreamRecorder*)user_data)->addToBuffer((unsigned char*)GST_BUFFER_DATA (buffer));
// user data is the class
((StreamRecorder*)user_data)->addToBuffer((unsigned char*)info.data, info.size);
gst_buffer_unmap (buffer, &info);
return 0;
}
//------------------------------------------------------------------------------
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment