cygwin上文编译文章.


在ffmpeg/arm添加的文件夹Android.mk 的主要目的是为了宣布动态库libs下一个

LOCAL_PATH:= $(call my-dir)
 
include $(CLEAR_VARS)
LOCAL_MODULE:= libavcodec
LOCAL_SRC_FILES:= lib/libavcodec-55.so
LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include
include $(PREBUILT_SHARED_LIBRARY)
 
include $(CLEAR_VARS)
LOCAL_MODULE:= libavformat
LOCAL_SRC_FILES:= lib/libavformat-55.so
LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include
include $(PREBUILT_SHARED_LIBRARY)
 
include $(CLEAR_VARS)
LOCAL_MODULE:= libswscale
LOCAL_SRC_FILES:= lib/libswscale-2.so
LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include
include $(PREBUILT_SHARED_LIBRARY)
 
include $(CLEAR_VARS)
LOCAL_MODULE:= libavutil
LOCAL_SRC_FILES:= lib/libavutil-52.so
LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include
include $(PREBUILT_SHARED_LIBRARY)
 
include $(CLEAR_VARS)
LOCAL_MODULE:= libavfilter
LOCAL_SRC_FILES:= lib/libavfilter-4.so
LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include
include $(PREBUILT_SHARED_LIBRARY)
 
include $(CLEAR_VARS)
LOCAL_MODULE:= libwsresample
LOCAL_SRC_FILES:= lib/libswresample-0.so
LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include
include $(PREBUILT_SHARED_LIBRARY)


添加Media.h

#pragma once
#include <jni.h>
#include <android/native_window_jni.h>
#include "utils/Lock.h"
#include <pthread.h>

//ffmpeg 须要先定义 __STDC_CONSTANT_MACROS 才干通过 c++ 编译
#define __STDC_CONSTANT_MACROS
#ifndef INT64_C
#define INT64_C(c) (c ## LL)
#define UINT64_C(c) (c ## ULL)
#endif
extern "C" {
	#include <libavcodec/avcodec.h>
	#include <libavformat/avformat.h>
	#include <libavutil/avutil.h>
	#include <libavutil/dict.h>
	#include <libavutil/frame.h>
	#include <libavutil/mem.h>
	#include <libavutil/pixfmt.h>
	#include <libswscale/swscale.h>
	#include <libavutil/time.h>
	#include <libavutil/opt.h>
	#include <libswresample/swresample.h>
}

class Media
{
	public:

		Media();
		~Media();
		void setSurface(JNIEnv *pEnv, jobject pSurface,int pWidth,int pHeight);
		bool initPath(const char * path);
		bool initCodec(int width,int height);
		int getResWidth();
		int getResHeight();
		void play();
		void pause();
		void stop();
		bool isPlaying();
		void decodeAndRenderPic(void *pBuffer,int dwBufsize);
		void decodeAudioAndPlay(void *pBuffer,int dwBufsize);
	private:
		static void* decodeAndRenderAdpt(void *params);
		void decodeAndRender();
	private:
		bool bInit;

		ANativeWindow* 		window;
		char 				*videoFileName;
		AVFormatContext 	*formatCtx;
		int 				videoStream;
		int               audioStream;
		AVCodecContext  	*codecCtx;
		AVCodecContext  	*codecCtxAudio;
		AVFrame         	*decodedFrame;
		AVFrame         	*frameRGBA ;
		jobject				bitmap;
		void*				buffer;
		struct SwsContext   *sws_ctx;
		struct SwrContext   *swr_ctx;
		int 				width;
		int 				height;
		bool               _stop;

		pthread_t decodeThread;

		Mutex mutexSurface;
		Mutex lockWindow;
};


核心代码例如以下:


添加Media.cpp

if(packet.stream_index==videoStream) { // Decode video frame avcodec_decode_video2(codecCtx, decodedFrame, &frameFinished, &packet); // Did we get a video frame?

if(frameFinished) { // Convert the image from its native format to RGBA sws_scale ( sws_ctx, (uint8_t const * const *)decodedFrame->data, decodedFrame->linesize, 0, codecCtx->height, frameRGBA->data, frameRGBA->linesize ); if(packet.dts == AV_NOPTS_VALUE && decodedFrame->opaque && *(uint64_t*)decodedFrame->opaque != AV_NOPTS_VALUE) { pts = *(uint64_t *)decodedFrame->opaque; LOGD("pst1: %d",pts); } else if(packet.dts != AV_NOPTS_VALUE) { pts = packet.dts; LOGD("pst2: %d",pts); } else { pts = 0; LOGD("pst3: %d",pts); } //pts = av_q2d(codecCtx->time_base) * 1000000.0 * i * 2; pts *= 1000; //LOGD("debug %d,%d,%f",pts, (long)(av_q2d(codecCtx->time_base) * 1000000.0 * i * 2), av_q2d(codecCtx->time_base)); if(0 == pts || 0 == baseTime) { baseTime = av_gettime() - pts; LOGD("BASETIME: %d",baseTime); }else{ waitTime = (baseTime + pts) - av_gettime(); LOGD("WAITTIME: %d, %d",waitTime,pts); } //waitTime = (av_q2d(codecCtx->time_base) * 1000.0 - 0.0) * 1000; if(waitTime>0) usleep(waitTime); if(!_stop) { synchronized(lockWindow) { if(!_stop && NULL!=window) { // lock the window buffer if (ANativeWindow_lock(pWin, &windowBuffer, NULL) < 0) { LOGE("cannot lock window"); } else { // draw the frame on buffer //LOGD("copy buffer %d:%d:%d", width, height, width*height*RGB_SIZE); //LOGD("window buffer: %d:%d:%d", windowBuffer.width, windowBuffer.height, windowBuffer.stride); //memcpy(windowBuffer.bits, buffer, width * height * RGB_SIZE); if(windowBuffer.width >= windowBuffer.stride){ //LOGE("1=========windowBuffer: %d,%d,%d,%d", windowBuffer.format,windowBuffer.stride,windowBuffer.width,windowBuffer.height); memcpy(windowBuffer.bits, buffer, width * height * RGB_SIZE); }else{ //LOGE("2=========windowBuffer: %d,%d,%d,%d", windowBuffer.format,windowBuffer.stride,windowBuffer.width,windowBuffer.height); //skip stride-width 跳过padding部分内存 for(int i=0;i<height;++i) memcpy(windowBuffer.bits + windowBuffer.stride * i * RGB_SIZE , buffer + width * i * RGB_SIZE , width * RGB_SIZE); } // unlock the window buffer and post it to display ANativeWindow_unlockAndPost(pWin); // count number of frames ++i; } } } } } }else if(packet.stream_index==audioStream) { int ret = avcodec_decode_audio4(codecCtxAudio,decodedFrame, &frameFinished, &packet); // LOGD("avcodec_decode_audio4, %d , ret %d" , frameFinished, ret); if(frameFinished) { // LOGD("read audio play"); size_t unpadded_linesize = decodedFrame->nb_samples * av_get_bytes_per_sample((AVSampleFormat)decodedFrame->format); /* Write the raw audio data samples of the first plane. This works * fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However, * most audio decoders output planar audio, which uses a separate * plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P). * In other words, this code will write only the first audio channel * in these cases. * You should use libswresample or libavfilter to convert the frame * to packed data. */ if(NULL!=swr_ctx) { int dst_linesize = 0; int dst_nb_samples =av_rescale_rnd(decodedFrame->nb_samples, decodedFrame->sample_rate, codecCtxAudio->sample_rate, AV_ROUND_UP); int dst_nb_channels = av_get_channel_layout_nb_channels(codecCtxAudio->channels ==1 ?AV_CH_LAYOUT_MONO:AV_CH_LAYOUT_STEREO); av_samples_alloc_array_and_samples(&dst_data,&dst_linesize,dst_nb_channels,dst_nb_samples,codecCtxAudio->sample_fmt == AV_SAMPLE_FMT_U8?

AV_SAMPLE_FMT_U8:AV_SAMPLE_FMT_S16, 0); int ret = audio_swr_resampling_audio(swr_ctx,decodedFrame,dst_data); if(ret>0){ writeAudio(dst_data[0],ret); //fwrite(dst_data[0], 1, ret, stream); } if (dst_data) { av_freep(&dst_data[0]); } av_freep(&dst_data); }else{ writeAudio(decodedFrame->extended_data[0], unpadded_linesize); //fwrite(decodedFrame->extended_data[0], 1, unpadded_linesize, stream); } //fwrite(decodedFrame->extended_data[0], 1, unpadded_linesize, audio_dst_file); LOGD("read audio buffer: %d ,%d", unpadded_linesize, decodedFrame->linesize[0]); }else{ //LOGD("===read audio buffer: %d", packet.size); //writeAudio(packet.data, packet.size); } }else{ LOGD("unkown stream index: %d", packet.stream_index); } // Free the packet that was allocated by av_read_frame av_free_packet(&packet); } //fclose(stream); LOGI("total No. of frames decoded and rendered %d", i); } void Media::decodeAndRenderPic(void *pBuffer,int dwBufsize) { ANativeWindow_Buffer windowBuffer; AVPacket packet; int frameFinished; int lineCnt; ANativeWindow * pWin; pWin=window; ARect rect; rect.left=0; rect.top=0; rect.right = width; rect.bottom = height; memset(&packet,0x00,sizeof(AVPacket)); packet.data = (uint8_t*)pBuffer;//这里填入一个指向完整H264数据帧的指针 packet.size = dwBufsize;//这个填入H264数据帧的大小 // Decode video frame avcodec_decode_video2(codecCtx, decodedFrame, &frameFinished, &packet); // Did we get a video frame?

//LOGD("111111111111111111111111"); if(frameFinished && NULL!=window && bInit) { // Convert the image from its native format to RGBA sws_scale ( sws_ctx, (uint8_t const * const *)decodedFrame->data, decodedFrame->linesize, 0, codecCtx->height, frameRGBA->data, frameRGBA->linesize ); //LOGD("22222222222222222222222222222"); synchronized(lockWindow) { if(NULL!=window) { // lock the window buffer if (ANativeWindow_lock(pWin, &windowBuffer, &rect) < 0) { LOGE("cannot lock window"); } else { //LOGD("333333333333333333333333333"); // draw the frame on buffer LOGD("copy buffer %d:%d:%d lineSize:%d", width, height, width*height*RGB_SIZE, frameRGBA->linesize[0]); LOGD("RECT : %d,%d,%d,%d",rect.left,rect.top,rect.right,rect.bottom); //LOGD("window buffer: %d:%d:%d", windowBuffer.width,windowBuffer.height, windowBuffer.stride); if(windowBuffer.width >= windowBuffer.stride){ //LOGE("1=========windowBuffer: %d,%d,%d,%d", windowBuffer.format,windowBuffer.stride,windowBuffer.width,windowBuffer.height); memcpy(windowBuffer.bits, buffer, width * height * RGB_SIZE); }else{ //LOGE("2=========windowBuffer: %d,%d,%d,%d", windowBuffer.format,windowBuffer.stride,windowBuffer.width,windowBuffer.height); //skip stride-width 跳过padding部分内存 for(int i=0;i<height;++i) memcpy(windowBuffer.bits + windowBuffer.stride * i * RGB_SIZE , buffer + width * i * RGB_SIZE , width * RGB_SIZE); } //LOGD("666666666666666666666666666"); // unlock the window buffer and post it to display ANativeWindow_unlockAndPost(pWin); // count number of frames //SaveFrame(pEnv, bitmap, codecCtx->width, codecCtx->height, i); //stop = 1; } } } } //LOGD("44444444444444444444444"); // Free the packet that was allocated by av_read_frame av_free_packet(&packet); //LOGD("5555555555555555555555555"); } void Media::decodeAudioAndPlay(void *pBuffer,int dwBufsize) { AVPacket packet; int frameFinished; LOGD("decodeAudioAndPlay start"); if(NULL == codecCtxAudio) { LOGD("codecCtxAudio not init!"); return; } memset(&packet,0x00,sizeof(AVPacket)); packet.data = (uint8_t*)pBuffer;//这里填入一个指向完整H264数据帧的指针 packet.size = dwBufsize;//这个填入H264数据帧的大小 // Decode audio frame int ret = avcodec_decode_audio4(codecCtxAudio,decodedFrame, &frameFinished, &packet); LOGD("avcodec_decode_audio4, %d , ret %d" , frameFinished, ret); // Did we get a audio frame?

if(frameFinished && bInit) { size_t unpadded_linesize = decodedFrame->nb_samples * av_get_bytes_per_sample((AVSampleFormat)decodedFrame->format); writeAudio(decodedFrame->extended_data[0], unpadded_linesize); LOGD("writeAudio"); }else{ LOGD("writeAudio fail!"); } // Free the packet that was allocated by av_read_frame av_free_packet(&packet); LOGD("decodeAudioAndPlay end"); }




Audio.h 音频做成了单例模式... 没有怎么封装, 使用java的AudioTrack,使用native的话不同版本号Android的so文件不一样所以不考虑了.

#pragma once

void initAudio(int mhz=44100,bool bMono=false,bool b16Bit=true);
void writeAudio(void * buffer,int size);
void releaseAudio();

Audio.cpp

2:0x4|0x8,//0x4|0x8,//2, /*CHANNEL_CONFIGURATION_MONO*/ b16Bit?2:3, /*ENCODING_PCM_16BIT*/ buffer_size, /*bufferSizeInBytes*/ 1 /*AudioTrack.MODE_STREAM*/ ); audio_track = (jobject)pEnv->NewGlobalRef(audio_track); //setvolume LOGD("setStereoVolume 1"); jmethodID setStereoVolume =pEnv->GetMethodID(audio_track_cls,"setStereoVolume","(FF)I"); pEnv->CallIntMethod(audio_track,setStereoVolume,1.0,1.0); LOGD("setStereoVolume 2"); //play jmethodID method_play =pEnv->GetMethodID(audio_track_cls, "play", "()V"); pEnv->CallVoidMethod(audio_track, method_play); //write method_write =pEnv->GetMethodID(audio_track_cls,"write","([BII)I"); //method_write = (jmethodID)pEnv->NewGlobalRef(method_write); LOGI("initAudio OK, BufferSize/4:%d",buffer_size/4 ); static pthread_t thread=NULL; if(NULL==thread) pthread_create(&thread, NULL, audioThread, NULL); init = true; } void* audioThread(void *params) { LOGW("create thread : %d Audio.cpp audioThread", syscall(SYS_gettid)); AttachCurrentThread(); JNIEnv * env = getEnv(); while(true) { SE e = pop(); if(_stop) continue; int size = e->size; int wirteSize = 0; jbyte * buf= e->buf.get(); while(size > BUFFER_SIZE) { // LOGD("writeAudio , BufferSize/4:%d",BUFFER_SIZE ); env->SetByteArrayRegion(buffer, 0,BUFFER_SIZE, buf + wirteSize); //LOGD("writeAudio , ==========" ); env->CallVoidMethod(audio_track,method_write,buffer,0,BUFFER_SIZE); wirteSize += BUFFER_SIZE; size -= BUFFER_SIZE; } if(size>0) { //LOGD("writeAudio , size:%d",size ); env->SetByteArrayRegion(buffer, 0,size, buf + wirteSize); env->CallVoidMethod(audio_track,method_write,buffer,0,size); } //LOGD("writeAudio , OK! size:%d",e->size ); } DetachCurrentThread(); return NULL; } void writeAudio(void * buf,int size) { sharedptr<jbyte> b(new jbyte[size]); memcpy(b.get(),buf,size); ElementBuf *eb =new ElementBuf(); eb->buf = b; eb->size = size; SE e(eb); push(e); } void releaseAudio() { _stop = true; }



其他相关代码:

bool AttachCurrentThread()
{
	LOGI("AttachCurrentThread ing");
	JNIEnv * env;
	int status = 0;
	env = getEnv();
	if(NULL==env){
		int ret = g_jvm->AttachCurrentThread(&env, NULL);
		LOGI("AttachCurrentThread ok");
		return ret>=0;
	}
	LOGW("AttachCurrentThread fail, thread is attached");
	return false;
}
void DetachCurrentThread()
{
	LOGI("DetachCurrentThread ing");
	if(NULL!=getEnv())
		g_jvm->DetachCurrentThread();
	LOGI("DetachCurrentThread ok");
}
JNIEnv * getEnv()
{
<span style="white-space:pre">	</span>JNIEnv* env;
<span style="white-space:pre">	</span>if (g_jvm->GetEnv((void **)&env, JNI_VERSION_1_6) != JNI_OK) {
<span style="white-space:pre">		</span> return NULL;
<span style="white-space:pre">	</span>}
<span style="white-space:pre">	</span>return env;
}


项目的Android.mk 我这里包括了我使用的libspeex库,没用到的能够不用

LOCAL_PATH := $(call my-dir)

include $(CLEAR_VARS)

LOCAL_MODULE    := cu
#LOCAL_SRC_FILES := cu.cpp

FILE_LIST := $(wildcard $(LOCAL_PATH)/*.cpp) 
LOCAL_SRC_FILES += $(FILE_LIST:$(LOCAL_PATH)/%=%) 

FILE_LIST := $(wildcard $(LOCAL_PATH)/*.c) 
LOCAL_SRC_FILES += $(FILE_LIST:$(LOCAL_PATH)/%=%)

FILE_LIST := $(wildcard $(LOCAL_PATH)/*/*.cpp) 
LOCAL_SRC_FILES += $(FILE_LIST:$(LOCAL_PATH)/%=%)

LOCAL_LDLIBS := -llog -ljnigraphics -lz -landroid
LOCAL_SHARED_LIBRARIES := libavformat libavcodec libswscale libavutil libwsresample libspeex

include $(BUILD_SHARED_LIBRARY)

$(call import-add-path,$(LOCAL_PATH))
$(call import-add-path,$(LOCAL_PATH)/ffmpeg/arm/include)
$(call import-module, ffmpeg/arm)
$(call import-module, speex)
include $(all-subdir-makefiles)

Application.mk:

APP_ABI := armeabi
#APP_ABI := armeabi-v7a
APP_PLATFORM := android-9
APP_STL := stlport_static
APP_CPPFLAGS += -fexceptions
APP_CFLAGS += -Wno-error=format-security


临时贴这么多代码出来啦! 

音频视频同步展示仅仅做了最简单的依据视屏的pts做同步,我发现pts和网上说的不太一样.
音频使用的是:java 的AudioTrack




版权声明:本文博客原创文章,博客,未经同意,不得转载。

相关文章: