diff --git a/meson.build b/meson.build index c3b160c..275a00e 100644 --- a/meson.build +++ b/meson.build @@ -1,4 +1,4 @@ -project('g729-codec-service','c', 'cpp', version: '0.0.2', default_options : ['c_std=c17', 'cpp_std=c++17']) +project('g729-codec-service','c', 'cpp', version: '0.0.5', default_options : ['c_std=c17', 'cpp_std=c++17']) usockets = subproject('usockets') rapidjson = subproject('rapidjson') uwebsockets = subproject('uwebsockets') diff --git a/src/G729Encoder.cpp b/src/G729Encoder.cpp index 8e8bb24..d62836f 100644 --- a/src/G729Encoder.cpp +++ b/src/G729Encoder.cpp @@ -23,11 +23,8 @@ # define FRAME_SIZE 80 // 80 samples per frame # define MAX_ENCODER_OUTPUT 10 //# define -int encode_g729_frame(bcg729EncoderChannelContextStruct *encoderChannelContext, int16_t * pcm, int frameSize, uint8_t * data) { - int numberOfEncodingFrames = frameSize / FRAME_SIZE; +int encode_g729_frame(bcg729EncoderChannelContextStruct *encoderChannelContext, int16_t * pcm, int numberOfEncodingFrames, uint8_t * data) { int totalSize = 0; - - data = new uint8_t[numberOfEncodingFrames * MAX_ENCODER_OUTPUT](); // 160 samples // 2 frames to decode // each frame has 80 samples, each sample is 16 bits (2 bytes) @@ -53,8 +50,10 @@ G729Encoder::~G729Encoder() { void G729Encoder::processFrame(AudioFrame& inputFrame, AudioFrame& outputFrame) { int frameSize = inputFrame.size / 2; - uint8_t * g729_frames; - int byteSize = encode_g729_frame(encoderContext, (int16_t *) inputFrame.data, frameSize, g729_frames); + int numberOfEncodingFrames = frameSize / FRAME_SIZE; + uint8_t * g729_frames = new uint8_t[numberOfEncodingFrames * MAX_ENCODER_OUTPUT](); + + int byteSize = encode_g729_frame(encoderContext, (int16_t *) inputFrame.data, numberOfEncodingFrames, g729_frames); outputFrame.data = (int8_t *) g729_frames; outputFrame.size = byteSize; } @@ -62,8 +61,10 @@ void G729Encoder::processFrame(AudioFrame& inputFrame, AudioFrame& outputFrame) void G729Encoder::processFrame(AudioFrame& inputFrame, AudioFrame& outputFrame, VADEvent &vadEvent) { int frameSize = inputFrame.size / 2; - uint8_t * g729_frames; - int byteSize = encode_g729_frame(encoderContext, (int16_t *) inputFrame.data, frameSize, g729_frames); + int numberOfEncodingFrames = frameSize / FRAME_SIZE; + uint8_t * g729_frames = new uint8_t[numberOfEncodingFrames * MAX_ENCODER_OUTPUT](); + + int byteSize = encode_g729_frame(encoderContext, (int16_t *) inputFrame.data, numberOfEncodingFrames, g729_frames); vadEvent = VADEvent::NONE; if (byteSize >= 2 && byteSize <= 12) { if (voiceActive) {