Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
GStreamer_audioRecorder
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
m3
GStreamer_audioRecorder
Commits
6b4a2e43
Commit
6b4a2e43
authored
Aug 18, 2017
by
d.basulto
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
add format
parent
820dc52c
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
172 additions
and
182 deletions
+172
-182
StreamRecorder.cpp
StreamRecorder.cpp
+172
-182
No files found.
StreamRecorder.cpp
View file @
6b4a2e43
...
...
@@ -27,34 +27,34 @@ using namespace std;
*/
StreamRecorder
::
StreamRecorder
(
const
char
*
source
,
int
time
)
{
int
nFrames
=
ceil
(
time
*
STREAMRECORDER_SAMPLERATE
/
READSIZE
);
recordTime
=
nFrames
*
READSIZE
/
STREAMRECORDER_SAMPLERATE
;
cout
<<
"record time: "
<<
recordTime
<<
endl
;
int
nFrames
=
ceil
(
time
*
STREAMRECORDER_SAMPLERATE
/
READSIZE
);
recordTime
=
nFrames
*
READSIZE
/
STREAMRECORDER_SAMPLERATE
;
cout
<<
"record time: "
<<
recordTime
<<
endl
;
// buffer size
bufferSize
=
nFrames
*
READSIZE
*
STREAMRECORDER_BYTESPERSAMPLE
;
bufferSize
=
nFrames
*
READSIZE
*
STREAMRECORDER_BYTESPERSAMPLE
;
// buffer that ig going to contain the audio
audioBuffer
=
new
unsigned
char
[
bufferSize
];
audioBuffer
=
new
unsigned
char
[
bufferSize
];
// Puts all the elements in audio buffer in 0
memset
(
audioBuffer
,
0
,
bufferSize
);
memset
(
audioBuffer
,
0
,
bufferSize
);
// set the pointer to the actual position of the buffer;
audioBufferPosition
=
audioBuffer
;
audioBufferPosition
=
audioBuffer
;
// Bytes saved counter
nBytes
=
0
;
nBytes
=
0
;
// flag that save the conncetion state
isConnectionLost
=
false
;
// Create the elements and sets capabilities
createMainPipeline
();
createMainPipeline
();
// Connec with the URI
connect
(
source
);
connect
(
source
);
}
//------------------------------------------------------------------------------
...
...
@@ -65,24 +65,24 @@ StreamRecorder::StreamRecorder(const char* source, int time) {
*/
int
StreamRecorder
::
connect
(
const
char
*
uri
)
{
disconnect
();
cout
<<
"connecting to "
<<
uri
<<
endl
;
disconnect
();
cout
<<
"connecting to "
<<
uri
<<
endl
;
// Uri destiny
pluginUri
=
(
char
*
)
uri
;
gst_element_set_state
(
mainPipeline
,
GST_STATE_NULL
);
gst_element_set_state
(
mainPipeline
,
GST_STATE_READY
);
// Assign the uri property
g_object_set
(
G_OBJECT
(
streamSrc
),
"uri"
,
uri
,
NULL
);
gst_element_link
(
streamSrc
,
audioConvert
);
g_object_set
(
G_OBJECT
(
streamSrc
),
"uri"
,
uri
,
NULL
);
gst_element_link
(
streamSrc
,
audioConvert
);
// Main pipeline starts to work
gst_element_set_state
(
mainPipeline
,
GST_STATE_PLAYING
);
return
0
;
gst_element_set_state
(
mainPipeline
,
GST_STATE_PLAYING
);
return
0
;
}
//------------------------------------------------------------------------------
...
...
@@ -106,18 +106,18 @@ int StreamRecorder::disconnect() {
int
StreamRecorder
::
createMainPipeline
()
{
mainPipeline
=
gst_pipeline_new
(
"stream-recorder"
);
GstBus
*
bus
;
bus
=
gst_pipeline_get_bus
(
GST_PIPELINE
(
mainPipeline
));
gst_bus_add_watch
(
bus
,
bus_callback
,
this
);
gst_object_unref
(
GST_OBJECT
(
bus
));
streamSrc
=
gst_element_factory_make
(
"uridecodebin"
,
"stream_source"
);
audioConvert
=
gst_element_factory_make
(
"audioconvert"
,
"audio_convert"
);
//audioResample = gst_element_factory_make ("audioresample", "audio_resample");
filterCaps
=
gst_element_factory_make
(
"capsfilter"
,
"filter_cap"
);
GstCaps
*
fc
=
gst_caps_new_full
(
gst_structure_new
(
"audio/x-raw"
,
GstBus
*
bus
;
bus
=
gst_pipeline_get_bus
(
GST_PIPELINE
(
mainPipeline
));
gst_bus_add_watch
(
bus
,
bus_callback
,
this
);
gst_object_unref
(
GST_OBJECT
(
bus
));
streamSrc
=
gst_element_factory_make
(
"uridecodebin"
,
"stream_source"
);
audioConvert
=
gst_element_factory_make
(
"audioconvert"
,
"audio_convert"
);
//audioResample = gst_element_factory_make ("audioresample", "audio_resample");
filterCaps
=
gst_element_factory_make
(
"capsfilter"
,
"filter_cap"
);
GstCaps
*
fc
=
gst_caps_new_full
(
gst_structure_new
(
"audio/x-raw"
,
"channels"
,
G_TYPE_INT
,
1
,
"rate"
,
G_TYPE_INT
,
STREAMRECORDER_SAMPLERATE
,
"format"
,
G_TYPE_STRING
,
"S16LE"
,
...
...
@@ -126,27 +126,27 @@ int StreamRecorder::createMainPipeline() {
NULL
),
NULL
);
g_object_set
(
G_OBJECT
(
filterCaps
),
"caps"
,
fc
,
NULL
);
queue0
=
gst_element_factory_make
(
"queue"
,
"queue0"
);
filter
=
gst_element_factory_make
(
"identity"
,
"audio_filter"
);
g_signal_connect
(
filter
,
"handoff"
,
G_CALLBACK
(
filter_handoff_callback
),
this
);
queue1
=
gst_element_factory_make
(
"queue"
,
"queue1"
);
fakeSink
=
gst_element_factory_make
(
"fakesink"
,
"fake_sink"
);
//audioSink = gst_element_factory_make("autoaudiosink", "speaker");
gst_bin_add_many
(
GST_BIN
(
mainPipeline
),
streamSrc
,
audioConvert
,
filterCaps
,
queue0
,
filter
,
queue1
,
fakeSink
,
NULL
);
if
(
!
gst_element_link_many
(
audioConvert
,
filterCaps
,
queue0
,
filter
,
queue1
,
fakeSink
,
NULL
)){
//gst_bin_add_many (GST_BIN (mainPipeline), streamSrc, audioConvert, filterCaps, queue0, filter, queue1, audioSink, NULL);
//if(!gst_element_link_many(audioConvert, filterCaps, queue0, filter, queue1, audioSink, NULL)){
g_object_set
(
G_OBJECT
(
filterCaps
),
"caps"
,
fc
,
NULL
);
queue0
=
gst_element_factory_make
(
"queue"
,
"queue0"
);
filter
=
gst_element_factory_make
(
"identity"
,
"audio_filter"
);
g_signal_connect
(
filter
,
"handoff"
,
G_CALLBACK
(
filter_handoff_callback
),
this
);
cerr
<<
"mainPipeline: Failed to link elements in the pipeline"
<<
endl
;
queue1
=
gst_element_factory_make
(
"queue"
,
"queue1"
);
fakeSink
=
gst_element_factory_make
(
"fakesink"
,
"fake_sink"
);
//audioSink = gst_element_factory_make("autoaudiosink", "speaker");
gst_bin_add_many
(
GST_BIN
(
mainPipeline
),
streamSrc
,
audioConvert
,
filterCaps
,
queue0
,
filter
,
queue1
,
fakeSink
,
NULL
);
if
(
!
gst_element_link_many
(
audioConvert
,
filterCaps
,
queue0
,
filter
,
queue1
,
fakeSink
,
NULL
)){
//gst_bin_add_many (GST_BIN (mainPipeline), streamSrc, audioConvert, filterCaps, queue0, filter, queue1, audioSink, NULL);
// if(!gst_element_link_many(audioConvert, filterCaps, queue0, filter, queue1, audioSink, NULL)){
cerr
<<
"mainPipeline: Failed to link elements in the pipeline"
<<
endl
;
exit
(
0
);
}
g_signal_connect
(
streamSrc
,
"pad-added"
,
G_CALLBACK
(
srcNewPad_callback
),
this
);
gst_element_set_state
(
mainPipeline
,
GST_STATE_NULL
);
g_signal_connect
(
streamSrc
,
"pad-added"
,
G_CALLBACK
(
srcNewPad_callback
),
this
);
gst_element_set_state
(
mainPipeline
,
GST_STATE_NULL
);
return
0
;
}
...
...
@@ -170,15 +170,14 @@ gboolean StreamRecorder::reconnectURIStream(void *instance){
*/
int
StreamRecorder
::
bus_callback
(
GstBus
*
bus
,
GstMessage
*
message
,
void
*
user_data
)
{
//printf("StreamRecorder got %s message\n", GST_MESSAGE_TYPE_NAME (message));
if
(
GST_MESSAGE_TYPE
(
message
)
==
GST_MESSAGE_EOS
){
((
StreamRecorder
*
)
user_data
)
->
isConnectionLost
=
true
;
}
switch
(
GST_MESSAGE_TYPE
(
message
))
{
switch
(
GST_MESSAGE_TYPE
(
message
))
{
case
GST_MESSAGE_EOS
:
...
...
@@ -228,7 +227,7 @@ void StreamRecorder::savePartialBuffer(void *user_data){
// Put the buffer position in the last position of the buffer
((
StreamRecorder
*
)
user_data
)
->
audioBufferPosition
+=
missingBytes
;
// Save in flac format
// Save in flac format
((
StreamRecorder
*
)
user_data
)
->
compressBuffer
();
// Restart the possition pointer
...
...
@@ -250,34 +249,33 @@ void StreamRecorder::savePartialBuffer(void *user_data){
*/
void
StreamRecorder
::
srcNewPad_callback
(
GstElement
*
element
,
GstPad
*
pad
,
void
*
data
)
{
cout
<<
gst_element_get_name
(
element
)
<<
" adding pad.."
<<
gst_pad_get_name
(
pad
)
<<
endl
;
cout
<<
"Pad Name: "
<<
gst_pad_get_name
(
pad
)
<<
endl
;
GstPad
*
sinkpad
;
// gst_pad_get_caps is for gst v0.1
//GstCaps *new_pad_caps = gst_pad_get_caps (pad);
GstCaps
*
new_pad_caps
=
gst_pad_query_caps
(
pad
,
NULL
);
GstStructure
*
new_pad_struct
=
gst_caps_get_structure
(
new_pad_caps
,
0
);
const
gchar
*
new_pad_type
=
gst_structure_get_name
(
new_pad_struct
);
if
(
g_str_has_prefix
(
new_pad_type
,
"audio/x-raw"
))
{
cout
<<
"linking "
<<
new_pad_type
<<
endl
;
GstElement
*
nextElement
=
((
StreamRecorder
*
)
data
)
->
audioConvert
;
sinkpad
=
gst_element_get_static_pad
(
nextElement
,
"sink"
);
if
(
GST_PAD_LINK_FAILED
(
gst_pad_link
(
pad
,
sinkpad
)))
{
cerr
<<
"Type is "
<<
new_pad_type
<<
" but link failed."
<<
endl
;
exit
(
0
);
}
else
{
cout
<<
"Link succeeded "
<<
new_pad_type
<<
endl
;
}
}
cout
<<
gst_element_get_name
(
element
)
<<
" adding pad.."
<<
gst_pad_get_name
(
pad
)
<<
endl
;
cout
<<
"Pad Name: "
<<
gst_pad_get_name
(
pad
)
<<
endl
;
GstPad
*
sinkpad
;
//gst_pad_get_caps is for gst v0.1
// GstCaps *new_pad_caps = gst_pad_get_caps (pad);
GstCaps
*
new_pad_caps
=
gst_pad_query_caps
(
pad
,
NULL
);
GstStructure
*
new_pad_struct
=
gst_caps_get_structure
(
new_pad_caps
,
0
);
const
gchar
*
new_pad_type
=
gst_structure_get_name
(
new_pad_struct
);
if
(
g_str_has_prefix
(
new_pad_type
,
"audio/x-raw"
))
{
cout
<<
"linking "
<<
new_pad_type
<<
endl
;
GstElement
*
nextElement
=
((
StreamRecorder
*
)
data
)
->
audioConvert
;
sinkpad
=
gst_element_get_static_pad
(
nextElement
,
"sink"
);
if
(
GST_PAD_LINK_FAILED
(
gst_pad_link
(
pad
,
sinkpad
)))
{
cerr
<<
"Type is "
<<
new_pad_type
<<
" but link failed."
<<
endl
;
exit
(
0
);
}
else
{
cout
<<
"Link succeeded "
<<
new_pad_type
<<
endl
;
}
}
}
//------------------------------------------------------------------------------
...
...
@@ -286,45 +284,44 @@ void StreamRecorder::srcNewPad_callback(GstElement *element, GstPad *pad, void *
* @return unimplemented
*/
int
StreamRecorder
::
compressBuffer
()
{
long
int
currentTime
=
time
(
NULL
);
stringstream
ss
;
string
fileNameStr
,
currentTimeStr
;
ss
<<
currentTime
<<
endl
;
getline
(
ss
,
currentTimeStr
);
ss
<<
currentTimeStr
<<
".flac"
<<
endl
;
getline
(
ss
,
fileNameStr
);
int
readsize
=
READSIZE
;
FLAC__bool
ok
=
true
;
FLAC__StreamEncoder
*
encoder
=
0
;
FLAC__StreamMetadata
*
metadata
[
2
];
FLAC__StreamMetadata_VorbisComment_Entry
entry
;
FLAC__StreamEncoderInitStatus
init_status
;
/* allocate the encoder */
if
((
encoder
=
FLAC__stream_encoder_new
())
==
NULL
)
{
fprintf
(
stderr
,
"ERROR: allocating encoder
\n
"
);
return
1
;
int
StreamRecorder
::
compressBuffer
()
{
long
int
currentTime
=
time
(
NULL
);
stringstream
ss
;
string
fileNameStr
,
currentTimeStr
;
ss
<<
currentTime
<<
endl
;
getline
(
ss
,
currentTimeStr
);
ss
<<
currentTimeStr
<<
".flac"
<<
endl
;
getline
(
ss
,
fileNameStr
);
int
readsize
=
READSIZE
;
FLAC__bool
ok
=
true
;
FLAC__StreamEncoder
*
encoder
=
0
;
FLAC__StreamMetadata
*
metadata
[
2
];
FLAC__StreamMetadata_VorbisComment_Entry
entry
;
FLAC__StreamEncoderInitStatus
init_status
;
/* allocate the encoder */
if
((
encoder
=
FLAC__stream_encoder_new
())
==
NULL
)
{
fprintf
(
stderr
,
"ERROR: allocating encoder
\n
"
);
return
1
;
}
ok
&=
FLAC__stream_encoder_set_verify
(
encoder
,
true
);
ok
&=
FLAC__stream_encoder_set_compression_level
(
encoder
,
5
);
ok
&=
FLAC__stream_encoder_set_channels
(
encoder
,
1
);
ok
&=
FLAC__stream_encoder_set_bits_per_sample
(
encoder
,
STREAMRECORDER_BYTESPERSAMPLE
*
8
);
ok
&=
FLAC__stream_encoder_set_sample_rate
(
encoder
,
STREAMRECORDER_SAMPLERATE
);
ok
&=
FLAC__stream_encoder_set_total_samples_estimate
(
encoder
,
nBytes
/
STREAMRECORDER_BYTESPERSAMPL
E
);
/* now add some metadata; we'll add some tags and a padding block */
if
(
ok
)
{
if
(
(
metadata
[
0
]
=
FLAC__metadata_object_new
(
FLAC__METADATA_TYPE_VORBIS_COMMENT
))
==
NULL
||
(
metadata
[
1
]
=
FLAC__metadata_object_new
(
FLAC__METADATA_TYPE_PADDING
))
==
NULL
||
/* there are many tag (vorbiscomment) functions but these are convenient for this particular use: */
!
FLAC__metadata_object_vorbiscomment_entry_from_name_value_pair
(
&
entry
,
"ARTIST"
,
"artist"
)
||
ok
&=
FLAC__stream_encoder_set_verify
(
encoder
,
true
);
ok
&=
FLAC__stream_encoder_set_compression_level
(
encoder
,
5
);
ok
&=
FLAC__stream_encoder_set_channels
(
encoder
,
1
);
ok
&=
FLAC__stream_encoder_set_bits_per_sample
(
encoder
,
STREAMRECORDER_BYTESPERSAMPLE
*
8
);
ok
&=
FLAC__stream_encoder_set_sample_rate
(
encoder
,
STREAMRECORDER_SAMPLERAT
E
);
ok
&=
FLAC__stream_encoder_set_total_samples_estimate
(
encoder
,
nBytes
/
STREAMRECORDER_BYTESPERSAMPLE
);
/* now add some metadata; we'll add some tags and a padding block */
if
(
ok
)
{
if
(
(
metadata
[
0
]
=
FLAC__metadata_object_new
(
FLAC__METADATA_TYPE_VORBIS_COMMENT
))
==
NULL
||
(
metadata
[
1
]
=
FLAC__metadata_object_new
(
FLAC__METADATA_TYPE_PADDING
))
==
NULL
||
/* there are many tag (vorbiscomment) functions but these are convenient for this particular use: */
!
FLAC__metadata_object_vorbiscomment_entry_from_name_value_pair
(
&
entry
,
"ARTIST"
,
"artist"
)
||
!
FLAC__metadata_object_vorbiscomment_append_comment
(
metadata
[
0
],
entry
,
/*copy=*/
false
)
||
/* copy=false: let metadata object take control of entry's allocated string */
!
FLAC__metadata_object_vorbiscomment_entry_from_name_value_pair
(
&
entry
,
"YEAR"
,
"year"
)
||
!
FLAC__metadata_object_vorbiscomment_append_comment
(
metadata
[
0
],
entry
,
/*copy=*/
false
)
...
...
@@ -349,53 +346,49 @@ int StreamRecorder::compressBuffer()
ok
=
false
;
}
}
int
channels
=
1
;
int
channels
=
1
;
// int bps = 16;
unsigned
char
*
audioBufferTmp
=
audioBuffer
;
unsigned
char
*
buffer
=
new
unsigned
char
[
readsize
*
STREAMRECORDER_BYTESPERSAMPLE
*
channels
];
int
*
pcm
=
new
int
[
readsize
*
channels
];
/* read blocks of samples from WAVE file and feed to encoder */
if
(
ok
)
{
int
left
=
nBytes
;
while
(
left
&&
ok
)
{
int
need
=
(
left
>
readsize
?
readsize
:
left
);
memcpy
(
buffer
,
audioBufferTmp
,
need
);
//*sizeof(short));
audioBufferTmp
+=
need
;
//*sizeof(short);
/* convert the packed little-endian 16-bit PCM samples from WAVE into an interleaved FLAC__int32 buffer for libFLAC */
for
(
int
i
=
0
;
i
<
need
*
channels
;
i
++
)
{
/* inefficient but simple and works on big- or little-endian machines */
pcm
[
i
]
=
(
FLAC__int32
)(((
FLAC__int16
)(
FLAC__int8
)
buffer
[
2
*
i
+
1
]
<<
8
)
|
(
FLAC__int16
)
buffer
[
2
*
i
]);
if
(
ok
)
{
int
left
=
nBytes
;
while
(
left
&&
ok
)
{
int
need
=
(
left
>
readsize
?
readsize
:
left
);
memcpy
(
buffer
,
audioBufferTmp
,
need
);
//*sizeof(short));
audioBufferTmp
+=
need
;
//*sizeof(short);
// /* convert the packed little-endian 16-bit PCM samples from WAVE into an interleaved FLAC__int32 buffer for libFLAC */
for
(
int
i
=
0
;
i
<
need
*
channels
;
i
++
)
{
/* inefficient but simple and works on big- or little-endian machines */
pcm
[
i
]
=
(
FLAC__int32
)(((
FLAC__int16
)(
FLAC__int8
)
buffer
[
2
*
i
+
1
]
<<
8
)
|
(
FLAC__int16
)
buffer
[
2
*
i
]);
}
/* feed samples to encoder */
ok
=
FLAC__stream_encoder_process_interleaved
(
encoder
,
pcm
,
need
/
STREAMRECORDER_BYTESPERSAMPLE
);
left
-=
need
;
}
}
else
{
cout
<<
"-ERROR-"
<<
endl
;
else
{
cout
<<
"-ERROR-"
<<
endl
;
}
ok
&=
FLAC__stream_encoder_finish
(
encoder
);
fprintf
(
stderr
,
"encoding: %s
\n
"
,
ok
?
"succeeded"
:
"FAILED"
);
fprintf
(
stderr
,
" state: %s
\n
"
,
FLAC__StreamEncoderStateString
[
FLAC__stream_encoder_get_state
(
encoder
)]);
/* now that encoding is finished, the metadata can be freed */
FLAC__metadata_object_delete
(
metadata
[
0
]);
FLAC__metadata_object_delete
(
metadata
[
1
]);
FLAC__stream_encoder_delete
(
encoder
);
delete
[]
buffer
;
delete
[]
pcm
;
return
0
;
ok
&=
FLAC__stream_encoder_finish
(
encoder
);
fprintf
(
stderr
,
"encoding: %s
\n
"
,
ok
?
"succeeded"
:
"FAILED"
);
fprintf
(
stderr
,
" state: %s
\n
"
,
FLAC__StreamEncoderStateString
[
FLAC__stream_encoder_get_state
(
encoder
)]);
/* now that encoding is finished, the metadata can be freed */
FLAC__metadata_object_delete
(
metadata
[
0
]);
FLAC__metadata_object_delete
(
metadata
[
1
]);
FLAC__stream_encoder_delete
(
encoder
);
delete
[]
buffer
;
delete
[]
pcm
;
return
0
;
}
...
...
@@ -410,29 +403,26 @@ int StreamRecorder::compressBuffer()
*/
int
StreamRecorder
::
addToBuffer
(
unsigned
char
*
data
,
int
length
)
{
//cout << "addToBuffer(" << length << ")" << endl;
int
bytesRead
=
length
;
// READSIZE*STREAMRECORDER_BYTESPERSAMPLE;
memcpy
((
char
*
)
audioBufferPosition
,
(
char
*
)
data
,
bytesRead
);
int
bytesRead
=
length
;
// READSIZE*STREAMRECORDER_BYTESPERSAMPLE;
memcpy
((
char
*
)
audioBufferPosition
,
(
char
*
)
data
,
bytesRead
);
cout
<<
*
audioBufferPosition
<<
endl
;
audioBufferPosition
+=
bytesRead
;
nBytes
+=
bytesRead
;
//READSIZE;
audioBufferPosition
+=
bytesRead
;
nBytes
+=
bytesRead
;
//READSIZE;
cout
<<
"Bytes readed "
<<
nBytes
<<
endl
;
cout
<<
"Buffer size "
<<
bufferSize
<<
endl
;
if
(
nBytes
>=
bufferSize
)
{
compressBuffer
();
audioBufferPosition
=
audioBuffer
;
memset
(
audioBuffer
,
0
,
bufferSize
);
nBytes
=
0
;
}
compressBuffer
();
audioBufferPosition
=
audioBuffer
;
memset
(
audioBuffer
,
0
,
bufferSize
);
nBytes
=
0
;
}
return
nBytes
;
return
nBytes
;
}
...
...
@@ -446,21 +436,21 @@ int StreamRecorder::addToBuffer(unsigned char* data, int length) {
* @return unimplemented
*/
int
StreamRecorder
::
filter_handoff_callback
(
GstElement
*
filter
,
GstBuffer
*
buffer
,
void
*
user_data
)
{
int
StreamRecorder
::
filter_handoff_callback
(
GstElement
*
filter
,
GstBuffer
*
buffer
,
void
*
user_data
)
{
cout
<<
"LLEGO INFO"
<<
endl
;
GstMapInfo
info
;
if
(
!
gst_buffer_map
(
buffer
,
&
info
,
GST_MAP_READ
))
{
cout
<<
"ERROR: MAPPING IS NOT VALID"
<<
endl
;
}
//GST_BUFFER_DATA is for gst v0.1
//
((StreamRecorder*)user_data)->addToBuffer((unsigned char*)GST_BUFFER_DATA (buffer));
GstMapInfo
info
;
if
(
!
gst_buffer_map
(
buffer
,
&
info
,
GST_MAP_READ
))
{
cout
<<
"ERROR: MAPPING IS NOT VALID"
<<
endl
;
}
//GST_BUFFER_DATA is for gst v0.1
//
((StreamRecorder*)user_data)->addToBuffer((unsigned char*)GST_BUFFER_DATA (buffer));
// user data is the class
((
StreamRecorder
*
)
user_data
)
->
addToBuffer
((
unsigned
char
*
)
info
.
data
,
info
.
size
);
gst_buffer_unmap
(
buffer
,
&
info
);
return
0
;
((
StreamRecorder
*
)
user_data
)
->
addToBuffer
((
unsigned
char
*
)
info
.
data
,
info
.
size
);
gst_buffer_unmap
(
buffer
,
&
info
);
return
0
;
}
//------------------------------------------------------------------------------
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment