diff options
Diffstat (limited to 'multimedia/libxine/files/ffmpeg-vaapi_xine-lib-1.1.19-initerrorhack.diff')
-rw-r--r-- | multimedia/libxine/files/ffmpeg-vaapi_xine-lib-1.1.19-initerrorhack.diff | 851 |
1 files changed, 0 insertions, 851 deletions
diff --git a/multimedia/libxine/files/ffmpeg-vaapi_xine-lib-1.1.19-initerrorhack.diff b/multimedia/libxine/files/ffmpeg-vaapi_xine-lib-1.1.19-initerrorhack.diff deleted file mode 100644 index a48468a47de9..000000000000 --- a/multimedia/libxine/files/ffmpeg-vaapi_xine-lib-1.1.19-initerrorhack.diff +++ /dev/null @@ -1,851 +0,0 @@ -diff -Naur xine-lib-1.1.19.orig/src/combined/ffmpeg/ff_video_decoder.c xine-lib-1.1.19/src/combined/ffmpeg/ff_video_decoder.c ---- xine-lib-1.1.19.orig/src/combined/ffmpeg/ff_video_decoder.c -+++ xine-lib-1.1.19/src/combined/ffmpeg/ff_video_decoder.c -@@ -51,6 +51,21 @@ - # include <libpostproc/postprocess.h> - #endif - -+#include <va/va_x11.h> -+#include <libavcodec/vaapi.h> -+//#include <X11/Xutil.h> -+ -+typedef struct ff_va_surfaces_s ff_va_surfaces_t; -+ -+struct ff_va_surfaces_s -+{ -+ int count; -+ VASurfaceID **free; -+ VASurfaceID **used; -+}; -+ -+#define NUM_SURFACES 21 -+ - #define VIDEOBUFSIZE (128*1024) - #define SLICE_BUFFER_SIZE (1194*1024) - -@@ -89,6 +104,7 @@ typedef struct ff_video_class_s { - int thread_count; - int8_t skip_loop_filter_enum; - int8_t choose_speed_over_accuracy; -+ int enable_vaapi; - - xine_t *xine; - } ff_video_class_t; -@@ -127,6 +143,7 @@ struct ff_video_decoder_s { - int slice_offset_size; - - AVFrame *av_frame; -+ //AVPacket av_pkt; - AVCodecContext *context; - AVCodec *codec; - -@@ -154,8 +171,31 @@ struct ff_video_decoder_s { - #ifdef LOG - enum PixelFormat debug_fmt; - #endif -+ -+}; -+ -+typedef struct ff_vaapi_context_s ff_vaapi_context_t; -+ -+struct ff_vaapi_context_s { -+ VAImage va_image; -+ Display *display; -+ VADisplay *va_display; -+ VAImageFormat *va_ifmts; -+ uint8_t *va_image_data; -+ VAContextID va_context_id; -+ VAConfigID va_config_id; -+ VASurfaceID *va_surface_id; -+ struct ff_va_surfaces_s va_surfaces; -+ int width; -+ int height; -+ int va_profile; -+ struct vaapi_context *vaapi_context; -+ int use_vaapi; - }; - -+static struct ff_vaapi_context_s *va_context; -+ -+static VAStatus init_vaapi(struct ff_vaapi_context_s *va_context, int va_profile, int width, int height); - - static void set_stream_info(ff_video_decoder_t *this) { - _x_stream_info_set(this->stream, XINE_STREAM_INFO_VIDEO_WIDTH, this->bih.biWidth); -@@ -163,7 +203,70 @@ static void set_stream_info(ff_video_dec - _x_stream_info_set(this->stream, XINE_STREAM_INFO_VIDEO_RATIO, this->aspect_ratio * 10000); - } - -+/* -+ * XXX Error on init_vaapi() didn't cause fall back to standard rendering, -+ * so do a crude test on plugin open. -+ */ -+#define INIT_ERROR_HACK -+ - #ifdef ENABLE_DIRECT_RENDERING -+ -+static void draw_slice(struct AVCodecContext *context, const AVFrame *src, int offset[4], int y, int type, int height) { -+ uint8_t *source[4]= {src->data[0] + offset[0], src->data[1] + offset[1], src->data[2] + offset[2], src->data[3] + offset[3]}; -+ int strides[4] = {src->linesize[0], src->linesize[1], src->linesize[2]}; -+ -+ if (height < 0) { -+ int i; -+ height = -height; -+ y -= height; -+ for (i = 0; i < 4; i++) { -+ strides[i] = -strides[i]; -+ source[i] -= strides[i]; -+ } -+ } -+} -+ -+static VAProfile* find_profile(VAProfile* p, int np, int codec) -+{ -+ int i; -+ -+ for(i = 0; i < np; i++) -+ { -+ if(p[i] == codec) -+ return &p[i]; -+ } -+ -+ return NULL; -+} -+ -+int get_profile(int codec_id) { -+ int profile; -+ -+ switch (codec_id) { -+ case CODEC_ID_MPEG2VIDEO: -+ profile = VAProfileMPEG2Main; -+ break; -+ case CODEC_ID_MPEG4: -+ case CODEC_ID_H263: -+ profile = VAProfileMPEG4AdvancedSimple; -+ break; -+ case CODEC_ID_H264: -+ profile = VAProfileH264High; -+ break; -+ case CODEC_ID_WMV3: -+ profile = VAProfileVC1Main; -+ break; -+ case CODEC_ID_VC1: -+ profile = VAProfileVC1Advanced; -+ break; -+ default: -+ profile = -1; -+ break; -+ } -+ -+ return profile; -+} -+ - /* called from ffmpeg to do direct rendering method 1 */ - static int get_buffer(AVCodecContext *context, AVFrame *av_frame){ - ff_video_decoder_t *this = (ff_video_decoder_t *)context->opaque; -@@ -185,6 +288,43 @@ static int get_buffer(AVCodecContext *co - - avcodec_align_dimensions(context, &width, &height); - -+ if( va_context->use_vaapi ) { -+ int i = 0; -+ struct ff_va_surfaces_s *va_surfaces = &va_context->va_surfaces; -+ -+ for(i = 0; i < va_surfaces->count; i++) -+ { -+ //printf("srfc #%d %p\n",i,va_srfcs->srfc_free[i]); -+ if(va_surfaces->free[i]) -+ { -+ va_surfaces->used[i] = va_surfaces->free[i]; -+ va_surfaces->free[i] = NULL; -+ break; -+ } -+ } -+ if(i == va_surfaces->count) -+ { -+ printf("ERROR get surface\n"); -+ return -1; -+ } -+ -+ -+ av_frame->type = FF_BUFFER_TYPE_USER; -+ av_frame->age = 256*256*256*64; // FIXME FIXME from ffmpeg -+ av_frame->data[0] = (void*)va_surfaces->used[i]; -+ av_frame->data[1] = NULL; -+ av_frame->data[2] = NULL; -+ av_frame->data[3] = (void*)(size_t)*va_surfaces->used[i]; -+ av_frame->linesize[0] = 0; -+ av_frame->linesize[1] = 0; -+ av_frame->linesize[2] = 0; -+ av_frame->linesize[3] = 0; -+ -+ this->is_direct_rendering_disabled = 1; -+ -+ return 0; -+ } -+ - if( this->context->pix_fmt != PIX_FMT_YUV420P && this->context->pix_fmt != PIX_FMT_YUVJ420P ) { - if (!this->is_direct_rendering_disabled) { - xprintf(this->stream->xine, XINE_VERBOSITY_LOG, -@@ -254,10 +394,33 @@ static int get_buffer(AVCodecContext *co - static void release_buffer(struct AVCodecContext *context, AVFrame *av_frame){ - ff_video_decoder_t *this = (ff_video_decoder_t *)context->opaque; - -+ if( va_context->use_vaapi ) { -+ struct ff_va_surfaces_s *va_surfaces = &va_context->va_surfaces; -+ VASurfaceID va_surface_id = (VASurfaceID)(size_t)av_frame->data[3]; -+ int i = 0; -+ -+ for(i = 0; i < va_surfaces->count; i++) -+ { -+ //printf("srfc #%d %p 0x%08X\n",i,va_srfcs->srfc_used[i], va_srfcs->srfc_used[i] ? *va_srfcs->srfc_used[i] : -1); -+ if(va_surfaces->used[i] && (*va_surfaces->used[i] == va_surface_id)) -+ { -+ va_surfaces->free[i] = va_surfaces->used[i]; -+ va_surfaces->used[i] = NULL; -+ break; -+ } -+ } -+ -+ av_frame->data[0] = NULL; -+ av_frame->data[1] = NULL; -+ av_frame->data[2] = NULL; -+ av_frame->data[3] = NULL; -+ return; -+ } -+ - if (av_frame->type == FF_BUFFER_TYPE_USER) { - if ( av_frame->opaque ) { -- vo_frame_t *img = (vo_frame_t *)av_frame->opaque; -- -+ vo_frame_t *img = (vo_frame_t *)av_frame->opaque; -+ - img->free(img); - } - -@@ -299,13 +462,272 @@ static const int skip_loop_filter_enum_v - AVDISCARD_ALL - }; - -+static const char *VAProfile2string(VAProfile profile) -+{ -+ switch(profile) { -+#define PROFILE(profile) \ -+ case VAProfile##profile: return "VAProfile" #profile -+ PROFILE(MPEG2Simple); -+ PROFILE(MPEG2Main); -+ PROFILE(MPEG4Simple); -+ PROFILE(MPEG4AdvancedSimple); -+ PROFILE(MPEG4Main); -+ PROFILE(H264Baseline); -+ PROFILE(H264Main); -+ PROFILE(H264High); -+ PROFILE(VC1Simple); -+ PROFILE(VC1Main); -+ PROFILE(VC1Advanced); -+#undef PROFILE -+ default: break; -+ } -+ return "<unknown>"; -+} -+ -+static const char *VAEntrypoint2string(VAEntrypoint entrypoint) -+{ -+ switch(entrypoint) -+ { -+#define ENTRYPOINT(entrypoint) \ -+ case VAEntrypoint##entrypoint: return "VAEntrypoint" #entrypoint -+ ENTRYPOINT(VLD); -+ ENTRYPOINT(IZZ); -+ ENTRYPOINT(IDCT); -+ ENTRYPOINT(MoComp); -+ ENTRYPOINT(Deblocking); -+#undef ENTRYPOINT -+ default: break; -+ } -+ return "<unknown>"; -+} -+ -+static enum PixelFormat get_format(struct AVCodecContext *context, const enum PixelFormat *fmt) -+{ -+ int i, profile; -+ -+ for (i = 0; fmt[i] != PIX_FMT_NONE; i++) { -+ if (fmt[i] != PIX_FMT_VAAPI_VLD) -+ continue; -+ -+ profile = get_profile(context->codec_id); -+ -+ if (profile >= 0) { -+ VAStatus status; -+ -+ status = init_vaapi(va_context, profile, context->width, context->height); -+ -+ if( status == VA_STATUS_SUCCESS ) { -+ -+ //context->draw_horiz_band = draw_slice; -+ context->draw_horiz_band = NULL; -+ context->slice_flags = SLICE_FLAG_CODED_ORDER | SLICE_FLAG_ALLOW_FIELD; -+ context->dsp_mask = AV_CPU_FLAG_FORCE | AV_CPU_FLAG_MMX | AV_CPU_FLAG_MMX2 | AV_CPU_FLAG_SSE; -+ -+ va_context->vaapi_context->config_id = va_context->va_config_id; -+ va_context->vaapi_context->context_id = va_context->va_context_id; -+ context->hwaccel_context = va_context->vaapi_context; -+ printf("init vaapi successfully\n"); -+ va_context->use_vaapi = 1; -+ return fmt[i]; -+ } else { -+ va_context->use_vaapi = 0; -+ printf("vaapi init error\n"); -+ } -+ } else { -+ va_context->use_vaapi = 0; -+ } -+ } -+ return PIX_FMT_NONE; -+} -+ -+static void close_vaapi(ff_vaapi_context_t *va_context) { -+ if (va_context->use_vaapi == 0); -+ return; -+ -+ if(va_context->va_context_id) { -+ if(va_context->va_image.image_id != VA_INVALID_ID ) { -+ vaDestroyImage(va_context->va_display, va_context->va_image.image_id ); -+ } -+ if(va_context->va_context_id != VA_INVALID_ID) { -+ vaDestroyContext(va_context->va_display, va_context->va_context_id); -+ } -+ -+ int i = 0; -+ for( i = 0; i < va_context->va_surfaces.count; i++ ) { -+ if( va_context->va_surface_id[i] != VA_INVALID_SURFACE) { -+ vaDestroySurfaces(va_context->va_display, &va_context->va_surface_id[i], 1); -+ } -+ } -+ -+ if(va_context->va_config_id) -+ vaDestroyConfig(va_context->va_display, va_context->va_config_id); -+ -+ vaTerminate(va_context->va_display); -+ XCloseDisplay(va_context->display); -+ -+ free(va_context->va_surfaces.free); -+ free(va_context->va_surfaces.used); -+ free(va_context->va_surface_id); -+ free(va_context->vaapi_context); -+ free(va_context); -+ -+ va_context = NULL; -+ } -+} -+ -+static VAStatus create_vaapi_image(struct ff_vaapi_context_s *va_context) { -+ int i; -+ int fmt_count = vaMaxNumImageFormats( va_context->va_display ); -+ VAImageFormat *va_p_fmt = calloc( fmt_count, sizeof(*va_p_fmt) ); -+ VAImageFormat va_fmt; -+ -+ if( vaQueryImageFormats( va_context->va_display , va_p_fmt, &fmt_count ) ) { -+ free(va_p_fmt); -+ goto error; -+ } -+ -+ for( i = 0; i < fmt_count; i++ ) { -+ if ( va_p_fmt[i].fourcc == VA_FOURCC( 'Y', 'V', '1', '2' ) || -+ va_p_fmt[i].fourcc == VA_FOURCC( 'I', '4', '2', '0' ) || -+ va_p_fmt[i].fourcc == VA_FOURCC( 'N', 'V', '1', '2' ) ) { -+ -+ if( vaCreateImage( va_context->va_display, &va_p_fmt[i], va_context->width, va_context->height, &va_context->va_image) ) { -+ va_context->va_image.image_id = VA_INVALID_ID; -+ continue; -+ } -+ if( vaGetImage(va_context->va_display, va_context->va_surface_id[0], 0, 0, -+ va_context->width, va_context->height, va_context->va_image.image_id) ) { -+ vaDestroyImage( va_context->va_display, va_context->va_image.image_id ); -+ va_context->va_image.image_id = VA_INVALID_ID; -+ continue; -+ } -+ printf("found valid image format\n"); -+ va_fmt = va_p_fmt[i]; -+ break; -+ } -+ } -+ free(va_p_fmt); -+ -+ if(va_context->va_image.image_id == VA_INVALID_ID) -+ goto error; -+ -+ return VA_STATUS_SUCCESS; -+ -+error: -+ return VA_STATUS_ERROR_UNKNOWN; -+} -+ -+static VAStatus init_vaapi(struct ff_vaapi_context_s *va_context, int va_profile, int width, int height) { -+ size_t i; -+ VAStatus status = VA_STATUS_ERROR_UNKNOWN; -+ int surface_count = NUM_SURFACES; -+ VAConfigAttrib va_attrib; -+ int maj, min; -+ VAProfile *va_profiles = NULL; -+ VAProfile *va_profile_search = NULL; -+ int number_profiles; -+ -+ va_context->width = width; -+ va_context->height = height; -+ va_context->va_profile = va_profile; -+ -+ va_context->va_surfaces.free = NULL; -+ va_context->va_surfaces.used = NULL; -+ -+ va_context->va_display = NULL; -+ va_context->va_config_id = VA_INVALID_ID; -+ va_context->va_context_id = VA_INVALID_ID; -+ va_context->va_image.image_id = VA_INVALID_ID; -+ -+ va_context->display = XOpenDisplay(NULL); -+ if(!va_context->display) -+ goto error; -+ -+ va_context->va_display = vaGetDisplay(va_context->display); -+ va_context->vaapi_context->display = va_context->va_display; -+ -+ if(!va_context->va_display) -+ goto error; -+ -+ if(vaInitialize(va_context->va_display, &maj, &min)) -+ goto error; -+ -+ printf("libva: %d.%d\n", maj, min); -+ -+ printf("AvCodecContext w %d h %d\n", va_context->width, va_context->height); -+ -+ memset( &va_attrib, 0, sizeof(va_attrib) ); -+ va_attrib.type = VAConfigAttribRTFormat; -+ -+ va_profiles = malloc(vaMaxNumProfiles(va_context->va_display) * sizeof(VAProfile)); -+ -+ if(vaQueryConfigProfiles(va_context->va_display, va_profiles, &number_profiles)) { -+ free(va_profiles); -+ goto error; -+ } -+ -+ free(va_profiles); -+ -+ -+ va_profile_search = find_profile(va_profiles, number_profiles, va_profile); -+ -+ if(!va_profile_search) -+ goto error; -+ -+ printf("Profile: %d (%s) Entrypoint %d (%s)\n", va_context->va_profile, VAProfile2string(va_context->va_profile), VAEntrypointVLD, VAEntrypoint2string(VAEntrypointVLD)); -+ -+ if( vaGetConfigAttributes(va_context->va_display, va_context->va_profile, VAEntrypointVLD, &va_attrib, 1) ) -+ goto error; -+ -+ if( (va_attrib.value & VA_RT_FORMAT_YUV420) == 0 ) -+ goto error; -+ -+ if( vaCreateConfig(va_context->va_display, va_context->va_profile, VAEntrypointVLD, &va_attrib, 1, &va_context->va_config_id) ) { -+ va_context->va_config_id = VA_INVALID_ID; -+ goto error; -+ } -+ -+ if(va_context->va_surface_id == NULL) { -+ va_context->va_surface_id = malloc(sizeof(VASurfaceID) * surface_count); -+ va_context->va_surfaces.free = malloc(sizeof(VASurfaceID*) * surface_count); -+ va_context->va_surfaces.used = malloc(sizeof(VASurfaceID*) * surface_count); -+ } -+ -+ if( vaCreateSurfaces(va_context->va_display, va_context->width, va_context->height, VA_RT_FORMAT_YUV420, surface_count, va_context->va_surface_id) ) -+ goto error; -+ -+ for(i = 0; i < surface_count; i++) { -+ va_context->va_surfaces.free[i] = &va_context->va_surface_id[i]; -+ va_context->va_surfaces.used[i] = NULL; -+ } -+ -+ va_context->va_surfaces.count = surface_count; -+ -+ if(vaCreateContext(va_context->va_display, va_context->va_config_id, va_context->width, va_context->height, -+ VA_PROGRESSIVE, va_context->va_surface_id, surface_count, &va_context->va_context_id) ) { -+ va_context->va_context_id = VA_INVALID_ID; -+ goto error; -+ } -+ -+ status = create_vaapi_image(va_context); -+ if(status != VA_STATUS_SUCCESS) -+ goto error; -+ -+ va_context->use_vaapi = 1; -+ return VA_STATUS_SUCCESS; -+ -+error: -+ va_context->use_vaapi = 0; -+ return VA_STATUS_ERROR_UNKNOWN; -+} -+ - static void init_video_codec (ff_video_decoder_t *this, unsigned int codec_type) { - size_t i; - - /* find the decoder */ - this->codec = NULL; - -- for(i = 0; i < sizeof(ff_video_lookup)/sizeof(ff_codec_t); i++) -+ for(i = 0; i < sizeof(ff_video_lookup)/sizeof(ff_codec_t); i++) { - if(ff_video_lookup[i].type == codec_type) { - pthread_mutex_lock(&ffmpeg_lock); - this->codec = avcodec_find_decoder(ff_video_lookup[i].id); -@@ -314,6 +736,8 @@ static void init_video_codec (ff_video_d - ff_video_lookup[i].name); - break; - } -+ } -+ - - if (!this->codec) { - xprintf (this->stream->xine, XINE_VERBOSITY_LOG, -@@ -327,9 +751,11 @@ static void init_video_codec (ff_video_d - - this->context->width = this->bih.biWidth; - this->context->height = this->bih.biHeight; -+ - this->context->stream_codec_tag = this->context->codec_tag = - _x_stream_info_get(this->stream, XINE_STREAM_INFO_VIDEO_FOURCC); - -+ //av_init_packet(&this->av_pkt); - - /* Some codecs (eg rv10) copy flags in init so it's necessary to set - * this flag here in case we are going to use direct rendering */ -@@ -339,7 +765,6 @@ static void init_video_codec (ff_video_d - - if (this->class->choose_speed_over_accuracy) - this->context->flags2 |= CODEC_FLAG2_FAST; -- - pthread_mutex_lock(&ffmpeg_lock); - if (avcodec_open (this->context, this->codec) < 0) { - pthread_mutex_unlock(&ffmpeg_lock); -@@ -353,7 +778,6 @@ static void init_video_codec (ff_video_d - - if (this->codec->id == CODEC_ID_VC1 && - (!this->bih.biWidth || !this->bih.biHeight)) { -- /* VC1 codec must be re-opened with correct width and height. */ - avcodec_close(this->context); - - if (avcodec_open (this->context, this->codec) < 0) { -@@ -406,6 +830,13 @@ static void init_video_codec (ff_video_d - xprintf(this->stream->xine, XINE_VERBOSITY_LOG, - _("ffmpeg_video_dec: direct rendering enabled\n")); - } -+ if( this->class->enable_vaapi && !this->is_mpeg12) { -+ this->context->get_buffer = get_buffer; -+ this->context->reget_buffer = get_buffer; -+ this->context->release_buffer = release_buffer; -+ this->context->get_format = get_format; -+ this->context->thread_count = 1; -+ } - #endif - - /* flag for interlaced streams */ -@@ -429,7 +860,13 @@ static void init_video_codec (ff_video_d - this->frame_flags |= VO_INTERLACED_FLAG; - break; - } -+} -+ - -+static void enable_vaapi(void *user_data, xine_cfg_entry_t *entry) { -+ ff_video_class_t *class = (ff_video_class_t *) user_data; -+ -+ class->enable_vaapi = entry->num_value; - } - - static void choose_speed_over_accuracy_cb(void *user_data, xine_cfg_entry_t *entry) { -@@ -558,6 +995,36 @@ static int ff_handle_mpeg_sequence(ff_vi - return 1; - } - -+/* -+*/ -+ -+static int nv12_to_yv12(ff_video_decoder_t *this, uint8_t *data, int len, vo_frame_t *img) { -+ unsigned int Y_size = img->width * this->bih.biHeight; -+ unsigned int UV_size = img->width * this->bih.biHeight / 4; -+ unsigned int idx; -+ unsigned char *dst_Y = img->base[0]; -+ unsigned char *dst_U = img->base[1]; -+ unsigned char *dst_V = img->base[2]; -+ unsigned char *src = data + Y_size; -+ -+ // sanity check raw stream -+ if ( (len != (Y_size + (UV_size<<1))) ) { -+ printf("hmblck: Image size inconsistent with data size.\n"); -+ return 0; -+ } -+ -+ // luma data is easy, just copy it -+ xine_fast_memcpy(dst_Y, data, Y_size); -+ -+ // chroma data is interlaced UVUV... so deinterlace it -+ for(idx=0; idx<UV_size; idx++ ) { -+ *(dst_U + idx) = *(src + (idx<<1) + 0); -+ *(dst_V + idx) = *(src + (idx<<1) + 1); -+ } -+ -+ return 1; -+} -+ - static void ff_convert_frame(ff_video_decoder_t *this, vo_frame_t *img) { - int y; - uint8_t *dy, *du, *dv, *sy, *su, *sv; -@@ -567,6 +1034,49 @@ static void ff_convert_frame(ff_video_de - printf ("frame format == %08x\n", this->debug_fmt = this->context->pix_fmt); - #endif - -+ if(this->context->pix_fmt == PIX_FMT_VAAPI_VLD && va_context->va_image.image_id != VA_INVALID_ID) { -+ void *p_base; -+ const uint32_t fourcc = va_context->va_image.format.fourcc; -+ VAStatus status; -+ -+ if( !vaGetImage( va_context->va_display, (VASurfaceID)(size_t)this->av_frame->data[3], -+ 0, 0, img->width, this->bih.biHeight, va_context->va_image.image_id) ) { -+ -+ status = vaMapBuffer( va_context->va_display, va_context->va_image.buf, &p_base ) ; -+ if ( status == VA_STATUS_SUCCESS ) { -+ if( fourcc == VA_FOURCC('Y','V','1','2') || -+ fourcc == VA_FOURCC('I','4','2','0') ) { -+ -+ lprintf("VAAPI YV12 image\n"); -+ -+ yv12_to_yv12( -+ /* Y */ -+ (uint8_t*)p_base, img->width, -+ img->base[0], img->pitches[0], -+ /* U */ -+ (uint8_t*)p_base + (img->width * img->height * 5/4), img->width/2, -+ img->base[1], img->pitches[1], -+ /* V */ -+ (uint8_t*)p_base + (img->width * img->height), img->width/2, -+ img->base[2], img->pitches[2], -+ /* width x height */ -+ img->width, img->height); -+ -+ } -+ if( fourcc == VA_FOURCC('N','V','1','2') ) { -+ -+ lprintf("VAAPI NV12 image\n"); -+ -+ nv12_to_yv12(this, (uint8_t*)p_base, va_context->va_image.data_size, img); -+ -+ } -+ -+ status = vaUnmapBuffer( va_context->va_display, va_context->va_image.buf ); -+ } -+ } -+ return; -+ } -+ - dy = img->base[0]; - du = img->base[1]; - dv = img->base[2]; -@@ -1038,6 +1548,11 @@ static void ff_handle_mpeg12_buffer (ff_ - - lprintf("handle_mpeg12_buffer\n"); - -+ if (buf->decoder_flags & BUF_FLAG_FRAME_START) { -+ lprintf("BUF_FLAG_FRAME_START\n"); -+ this->size = 0; -+ } -+ - while ((size > 0) || (flush == 1)) { - - uint8_t *current; -@@ -1403,47 +1918,48 @@ static void ff_handle_buffer (ff_video_d - - /* aspect ratio provided by ffmpeg, override previous setting */ - if ((this->aspect_ratio_prio < 2) && -- av_cmp_q(this->context->sample_aspect_ratio, avr00)) { -+ av_cmp_q(this->context->sample_aspect_ratio, avr00)) { - - if (!this->bih.biWidth || !this->bih.biHeight) { - this->bih.biWidth = this->context->width; - this->bih.biHeight = this->context->height; - } - -- this->aspect_ratio = av_q2d(this->context->sample_aspect_ratio) * -- (double)this->bih.biWidth / (double)this->bih.biHeight; -- this->aspect_ratio_prio = 2; -- lprintf("ffmpeg aspect ratio: %f\n", this->aspect_ratio); -- set_stream_info(this); -+ this->aspect_ratio = av_q2d(this->context->sample_aspect_ratio) * -+ (double)this->bih.biWidth / (double)this->bih.biHeight; -+ this->aspect_ratio_prio = 2; -+ lprintf("ffmpeg aspect ratio: %f\n", this->aspect_ratio); -+ set_stream_info(this); - } - - if (got_picture && this->av_frame->data[0]) { - /* got a picture, draw it */ - got_one_picture = 1; -+ - if(!this->av_frame->opaque) { -- /* indirect rendering */ -+ /* indirect rendering */ - -- /* initialize the colorspace converter */ -- if (!this->cs_convert_init) { -- if ((this->context->pix_fmt == PIX_FMT_RGB32) || -- (this->context->pix_fmt == PIX_FMT_RGB565) || -- (this->context->pix_fmt == PIX_FMT_RGB555) || -- (this->context->pix_fmt == PIX_FMT_BGR24) || -- (this->context->pix_fmt == PIX_FMT_RGB24) || -- (this->context->pix_fmt == PIX_FMT_PAL8)) { -- this->output_format = XINE_IMGFMT_YUY2; -- init_yuv_planes(&this->yuv, (this->bih.biWidth + 15) & ~15, this->bih.biHeight); -- this->yuv_init = 1; -- } -- this->cs_convert_init = 1; -- } -- -- if (this->aspect_ratio_prio == 0) { -- this->aspect_ratio = (double)this->bih.biWidth / (double)this->bih.biHeight; -- this->aspect_ratio_prio = 1; -- lprintf("default aspect ratio: %f\n", this->aspect_ratio); -- set_stream_info(this); -- } -+ /* initialize the colorspace converter */ -+ if (!this->cs_convert_init) { -+ if ((this->context->pix_fmt == PIX_FMT_RGB32) || -+ (this->context->pix_fmt == PIX_FMT_RGB565) || -+ (this->context->pix_fmt == PIX_FMT_RGB555) || -+ (this->context->pix_fmt == PIX_FMT_BGR24) || -+ (this->context->pix_fmt == PIX_FMT_RGB24) || -+ (this->context->pix_fmt == PIX_FMT_PAL8)) { -+ this->output_format = XINE_IMGFMT_YUY2; -+ init_yuv_planes(&this->yuv, (this->bih.biWidth + 15) & ~15, this->bih.biHeight); -+ this->yuv_init = 1; -+ } -+ this->cs_convert_init = 1; -+ } -+ -+ if (this->aspect_ratio_prio == 0) { -+ this->aspect_ratio = (double)this->bih.biWidth / (double)this->bih.biHeight; -+ this->aspect_ratio_prio = 1; -+ lprintf("default aspect ratio: %f\n", this->aspect_ratio); -+ set_stream_info(this); -+ } - - /* xine-lib expects the framesize to be a multiple of 16x16 (macroblock) */ - img = this->stream->video_out->get_frame (this->stream->video_out, -@@ -1484,7 +2000,7 @@ static void ff_handle_buffer (ff_video_d - this->av_frame->pict_type); - - } else if (!this->av_frame->opaque) { -- /* colorspace conversion or copy */ -+ /* colorspace conversion or copy */ - ff_convert_frame(this, img); - } - -@@ -1572,6 +2088,15 @@ static void ff_decode_data (video_decode - _x_stream_info_set(this->stream, XINE_STREAM_INFO_FRAME_DURATION, (this->reported_video_step = this->video_step)); - } - -+ int codec_type = buf->type & 0xFFFF0000; -+ if (codec_type == BUF_VIDEO_MPEG) { -+ this->is_mpeg12 = 1; -+ if ( this->mpeg_parser == NULL ) { -+ this->mpeg_parser = calloc(1, sizeof(mpeg_parser_t)); -+ mpeg_parser_init(this->mpeg_parser); -+ } -+ } -+ - if (buf->decoder_flags & BUF_FLAG_PREVIEW) { - - ff_handle_preview_buffer(this, buf); -@@ -1589,24 +2114,24 @@ static void ff_decode_data (video_decode - ff_handle_header_buffer(this, buf); - - if (buf->decoder_flags & BUF_FLAG_ASPECT) { -- if (this->aspect_ratio_prio < 3) { -- this->aspect_ratio = (double)buf->decoder_info[1] / (double)buf->decoder_info[2]; -- this->aspect_ratio_prio = 3; -- lprintf("aspect ratio: %f\n", this->aspect_ratio); -- set_stream_info(this); -- } -+ if (this->aspect_ratio_prio < 3) { -+ this->aspect_ratio = (double)buf->decoder_info[1] / (double)buf->decoder_info[2]; -+ this->aspect_ratio_prio = 3; -+ lprintf("aspect ratio: %f\n", this->aspect_ratio); -+ set_stream_info(this); -+ } - } - - } else { - - /* decode */ - if (buf->pts) -- this->pts = buf->pts; -+ this->pts = buf->pts; - - if (this->is_mpeg12) { -- ff_handle_mpeg12_buffer(this, buf); -+ ff_handle_mpeg12_buffer(this, buf); - } else { -- ff_handle_buffer(this, buf); -+ ff_handle_buffer(this, buf); - } - - } -@@ -1685,6 +2210,8 @@ static void ff_dispose (video_decoder_t - - lprintf ("ff_dispose\n"); - -+ close_vaapi(va_context); -+ - if (this->decoder_ok) { - xine_list_iterator_t it; - AVFrame *av_frame; -@@ -1776,10 +2303,31 @@ static video_decoder_t *ff_video_open_pl - - this->dr1_frames = xine_list_new(); - -+ va_context = calloc(1, sizeof(ff_vaapi_context_t)); -+ va_context->vaapi_context = calloc(1, sizeof(struct vaapi_context)); -+ - #ifdef LOG - this->debug_fmt = -1; - #endif - -+ this->class->enable_vaapi = this->class->xine->config->register_bool(this->class->xine->config, "video.processing.ffmpeg_enable_vaapi", 1, -+ _("Enable usage of VAAPI"), -+ _("Let you enable the usage of VAAPI.\n"), -+ 10, enable_vaapi, this->class); -+ -+#ifdef INIT_ERROR_HACK -+ if ((init_vaapi(va_context, VAProfileH264High, 1280, 720) == VA_STATUS_SUCCESS )) { -+ close_vaapi(va_context); -+ } else { -+ this->class->enable_vaapi = 0; -+ xprintf(this->class->xine, XINE_VERBOSITY_LOG, _("ffmpeg_video_dec: VAAPI Enabled in config, but output video out driver does not support vaapi.\n")); -+ } -+ free(va_context->vaapi_context); -+ free(va_context); -+ va_context = calloc(1, sizeof(ff_vaapi_context_t)); -+ va_context->vaapi_context = calloc(1, sizeof(struct vaapi_context)); -+#endif -+ - return &this->video_decoder; - } - -diff -Naur xine-lib-1.1.19.orig/src/combined/ffmpeg/Makefile.am xine-lib-1.1.19/src/combined/ffmpeg/Makefile.am ---- xine-lib-1.1.19.orig/src/combined/ffmpeg/Makefile.am 2010-03-09 23:17:05.000000000 +0100 -+++ xine-lib-1.1.19/src/combined/ffmpeg/Makefile.am 2010-10-14 14:49:01.000000000 +0200 -@@ -43,7 +43,7 @@ - - xineplug_decode_ff_la_CFLAGS = $(VISIBILITY_FLAG) $(AM_CFLAGS) - xineplug_decode_ff_la_LDFLAGS = $(xineplug_ldflags) $(IMPURE_TEXT_LDFLAGS) --xineplug_decode_ff_la_LIBADD = $(XINE_LIB) $(MLIB_LIBS) -lm $(ZLIB_LIBS) \ -+xineplug_decode_ff_la_LIBADD = $(XINE_LIB) $(MLIB_LIBS) -lm -lva-x11 -lva -lX11 $(ZLIB_LIBS) \ - $(link_ffmpeg) $(PTHREAD_LIBS) $(LTLIBINTL) - - xineplug_decode_dvaudio_la_CFLAGS = $(VISIBILITY_FLAG) $(AM_CFLAGS) |