Discussion:
[PATCH V2 1/3] lavc/qsvdec: set complete_frame flags for progressive picture
(too old to reply)
Zhong Li
2018-03-22 14:41:51 UTC
Permalink
Set the flag MFX_BITSTREAM_COMPLETE_FRAME when it is a progressive picture.
This can fix vc1 decoding segment fault issues because can't set the start
code correctly.
See: ./avconv -hwaccel qsv -c:v vc1_qsv -i /fate-suite/vc1/SA00040.vc1
-vf "hwdownload, format=nv12" -f rawvideo /dev/null

v2: fix some h264 interlaced clips regression
a. field_order of some h264 interlaced video (e.g: cama3_vtc_b.avc) is marked as AV_FIELD_UNKNOWN
in h264_parser.c. This is not a completed frames.
So only set the MFX_BITSTREAM_COMPLETE_FRAME when it is progressive.
b. some clips have both progressive and interlaced frames (e.g.CAPAMA3_Sand_F.264),
the parsed field_order maybe changed druing the decoding progress.

This patch has been verified for other codecs(mpeg2/hevc/vp8).

Signed-off-by: Zhong Li <***@intel.com>
---
libavcodec/qsvdec.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/libavcodec/qsvdec.c b/libavcodec/qsvdec.c
index c74ec68..8148beb 100644
--- a/libavcodec/qsvdec.c
+++ b/libavcodec/qsvdec.c
@@ -321,6 +321,8 @@ static int qsv_decode(AVCodecContext *avctx, QSVContext *q,
bs.DataLength = avpkt->size;
bs.MaxLength = bs.DataLength;
bs.TimeStamp = avpkt->pts;
+ if (avctx->field_order == AV_FIELD_PROGRESSIVE)
+ bs.DataFlag |= MFX_BITSTREAM_COMPLETE_FRAME;
}

sync = av_mallocz(sizeof(*sync));
@@ -509,6 +511,7 @@ int ff_qsv_process_data(AVCodecContext *avctx, QSVContext *q,
pkt->data, pkt->size, pkt->pts, pkt->dts,
pkt->pos);

+ avctx->field_order = q->parser->field_order;
/* TODO: flush delayed frames on reinit */
if (q->parser->format != q->orig_pix_fmt ||
q->parser->coded_width != avctx->coded_width ||
@@ -533,7 +536,6 @@ int ff_qsv_process_data(AVCodecContext *avctx, QSVContext *q,
avctx->height = q->parser->height;
avctx->coded_width = q->parser->coded_width;
avctx->coded_height = q->parser->coded_height;
- avctx->field_order = q->parser->field_order;
avctx->level = q->avctx_internal->level;
avctx->profile = q->avctx_internal->profile;
--
1.8.3.1
Zhong Li
2018-03-22 14:41:53 UTC
Permalink
Currently pict_type are unset.
Add an extra param to fetch the picture type from qsv decoder

v2: fix the compile error since AV_PICTURE_TYPE_NONE is not existed in libav.
v3: remove the key_frame setting because the judgement “key frame is equal
to IDR frame” only suitable for H264.
For HEVC, all IRAP frames are key frames, and other codecs have no IDR
frame.

Signed-off-by: ChaoX A Liu <***@intel.com>
Signed-off-by: Zhong Li <***@intel.com>
---
libavcodec/qsv.c | 24 ++++++++++++++++++++++++
libavcodec/qsv_internal.h | 3 +++
libavcodec/qsvdec.c | 6 ++++++
3 files changed, 33 insertions(+)

diff --git a/libavcodec/qsv.c b/libavcodec/qsv.c
index 96dca14..c7ba642 100644
--- a/libavcodec/qsv.c
+++ b/libavcodec/qsv.c
@@ -195,6 +195,30 @@ int ff_qsv_find_surface_idx(QSVFramesContext *ctx, QSVFrame *frame)
return AVERROR_BUG;
}

+enum AVPictureType ff_qsv_map_pictype(int mfx_pic_type)
+{
+ enum AVPictureType type = AV_PICTURE_TYPE_NONE;
+ switch (mfx_pic_type & 0x7) {
+ case MFX_FRAMETYPE_I:
+ if (mfx_pic_type & MFX_FRAMETYPE_S)
+ type = AV_PICTURE_TYPE_SI;
+ else
+ type = AV_PICTURE_TYPE_I;
+ break;
+ case MFX_FRAMETYPE_B:
+ type = AV_PICTURE_TYPE_B;
+ break;
+ case MFX_FRAMETYPE_P:
+ if (mfx_pic_type & MFX_FRAMETYPE_S)
+ type = AV_PICTURE_TYPE_SP;
+ else
+ type = AV_PICTURE_TYPE_P;
+ break;
+ }
+
+ return type;
+}
+
static int qsv_load_plugins(mfxSession session, const char *load_plugins,
void *logctx)
{
diff --git a/libavcodec/qsv_internal.h b/libavcodec/qsv_internal.h
index 975c8de..07ddc59 100644
--- a/libavcodec/qsv_internal.h
+++ b/libavcodec/qsv_internal.h
@@ -48,6 +48,8 @@ typedef struct QSVMid {
typedef struct QSVFrame {
AVFrame *frame;
mfxFrameSurface1 surface;
+ mfxExtDecodedFrameInfo dec_info;
+ mfxExtBuffer *ext_param;

int queued;
int used;
@@ -83,6 +85,7 @@ int ff_qsv_codec_id_to_mfx(enum AVCodecID codec_id);
int ff_qsv_profile_to_mfx(enum AVCodecID codec_id, int profile);

int ff_qsv_map_pixfmt(enum AVPixelFormat format, uint32_t *fourcc);
+enum AVPictureType ff_qsv_map_pictype(int mfx_pic_type);

int ff_qsv_init_internal_session(AVCodecContext *avctx, mfxSession *session,
const char *load_plugins);
diff --git a/libavcodec/qsvdec.c b/libavcodec/qsvdec.c
index 8148beb..c0ed4bd 100644
--- a/libavcodec/qsvdec.c
+++ b/libavcodec/qsvdec.c
@@ -235,6 +235,11 @@ static int alloc_frame(AVCodecContext *avctx, QSVContext *q, QSVFrame *frame)

frame->surface.Data.MemId = &q->frames_ctx.mids[ret];
}
+ frame->surface.Data.ExtParam = &frame->ext_param;
+ frame->surface.Data.NumExtParam = 1;
+ frame->ext_param = (mfxExtBuffer*)&frame->dec_info;
+ frame->dec_info.Header.BufferId = MFX_EXTBUFF_DECODED_FRAME_INFO;
+ frame->dec_info.Header.BufferSz = sizeof(frame->dec_info);

frame->used = 1;

@@ -421,6 +426,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_TFF;
frame->interlaced_frame =
!(outsurf->Info.PicStruct & MFX_PICSTRUCT_PROGRESSIVE);
+ frame->pict_type = ff_qsv_map_pictype(out_frame->dec_info.FrameType);

/* update the surface properties */
if (avctx->pix_fmt == AV_PIX_FMT_QSV)
--
1.8.3.1
Maxym Dmytrychenko
2018-03-26 19:02:40 UTC
Permalink
aside of intend points at frame->surface..., should be ok
Post by Zhong Li
Currently pict_type are unset.
Add an extra param to fetch the picture type from qsv decoder
v2: fix the compile error since AV_PICTURE_TYPE_NONE is not existed in libav.
v3: remove the key_frame setting because the judgement “key frame is equal
to IDR frame” only suitable for H264.
For HEVC, all IRAP frames are key frames, and other codecs have no IDR
frame.
---
libavcodec/qsv.c | 24 ++++++++++++++++++++++++
libavcodec/qsv_internal.h | 3 +++
libavcodec/qsvdec.c | 6 ++++++
3 files changed, 33 insertions(+)
diff --git a/libavcodec/qsv.c b/libavcodec/qsv.c
index 96dca14..c7ba642 100644
--- a/libavcodec/qsv.c
+++ b/libavcodec/qsv.c
@@ -195,6 +195,30 @@ int ff_qsv_find_surface_idx(QSVFramesContext *ctx, QSVFrame *frame)
return AVERROR_BUG;
}
+enum AVPictureType ff_qsv_map_pictype(int mfx_pic_type)
+{
+ enum AVPictureType type = AV_PICTURE_TYPE_NONE;
+ switch (mfx_pic_type & 0x7) {
+ if (mfx_pic_type & MFX_FRAMETYPE_S)
+ type = AV_PICTURE_TYPE_SI;
+ else
+ type = AV_PICTURE_TYPE_I;
+ break;
+ type = AV_PICTURE_TYPE_B;
+ break;
+ if (mfx_pic_type & MFX_FRAMETYPE_S)
+ type = AV_PICTURE_TYPE_SP;
+ else
+ type = AV_PICTURE_TYPE_P;
+ break;
+ }
+
+ return type;
+}
+
static int qsv_load_plugins(mfxSession session, const char *load_plugins,
void *logctx)
{
diff --git a/libavcodec/qsv_internal.h b/libavcodec/qsv_internal.h
index 975c8de..07ddc59 100644
--- a/libavcodec/qsv_internal.h
+++ b/libavcodec/qsv_internal.h
@@ -48,6 +48,8 @@ typedef struct QSVMid {
typedef struct QSVFrame {
AVFrame *frame;
mfxFrameSurface1 surface;
+ mfxExtDecodedFrameInfo dec_info;
+ mfxExtBuffer *ext_param;
int queued;
int used;
@@ -83,6 +85,7 @@ int ff_qsv_codec_id_to_mfx(enum AVCodecID codec_id);
int ff_qsv_profile_to_mfx(enum AVCodecID codec_id, int profile);
int ff_qsv_map_pixfmt(enum AVPixelFormat format, uint32_t *fourcc);
+enum AVPictureType ff_qsv_map_pictype(int mfx_pic_type);
int ff_qsv_init_internal_session(AVCodecContext *avctx, mfxSession *session,
const char *load_plugins);
diff --git a/libavcodec/qsvdec.c b/libavcodec/qsvdec.c
index 8148beb..c0ed4bd 100644
--- a/libavcodec/qsvdec.c
+++ b/libavcodec/qsvdec.c
@@ -235,6 +235,11 @@ static int alloc_frame(AVCodecContext *avctx,
QSVContext *q, QSVFrame *frame)
frame->surface.Data.MemId = &q->frames_ctx.mids[ret];
}
+ frame->surface.Data.ExtParam = &frame->ext_param;
+ frame->surface.Data.NumExtParam = 1;
+ frame->ext_param = (mfxExtBuffer*)&frame->dec_info;
+ frame->dec_info.Header.BufferId = MFX_EXTBUFF_DECODED_FRAME_INFO;
+ frame->dec_info.Header.BufferSz = sizeof(frame->dec_info);
frame->used = 1;
@@ -421,6 +426,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_TFF;
frame->interlaced_frame =
!(outsurf->Info.PicStruct & MFX_PICSTRUCT_PROGRESSIVE);
+ frame->pict_type = ff_qsv_map_pictype(out_frame->
dec_info.FrameType);
/* update the surface properties */
if (avctx->pix_fmt == AV_PIX_FMT_QSV)
--
1.8.3.1
_______________________________________________
libav-devel mailing list
https://lists.libav.org/mailman/listinfo/libav-devel
Li, Zhong
2018-03-27 09:06:10 UTC
Permalink
It is a necessary pipeline step: attach the created buffer to surface, then we can get the frame type from MSDK.
Refer: https://github.com/Intel-Media-SDK/MediaSDK/blob/master/_studio/mfx_lib/decode/h264/src/mfx_h264_dec_decode.cpp#L1411
-----Original Message-----
Maxym Dmytrychenko
Sent: Tuesday, March 27, 2018 3:03 AM
Subject: Re: [libav-devel] [PATCH V3 3/3] lavc/qsvdec: expose frame pic_type
aside of intend points at frame->surface..., should be ok
Post by Zhong Li
Currently pict_type are unset.
Add an extra param to fetch the picture type from qsv decoder
v2: fix the compile error since AV_PICTURE_TYPE_NONE is not existed in libav.
v3: remove the key_frame setting because the judgement “key frame is
equal to IDR frame” only suitable for H264.
For HEVC, all IRAP frames are key frames, and other codecs have no IDR
frame.
---
libavcodec/qsv.c | 24 ++++++++++++++++++++++++
libavcodec/qsv_internal.h | 3 +++
libavcodec/qsvdec.c | 6 ++++++
3 files changed, 33 insertions(+)
diff --git a/libavcodec/qsv.c b/libavcodec/qsv.c index
96dca14..c7ba642 100644
--- a/libavcodec/qsv.c
+++ b/libavcodec/qsv.c
@@ -195,6 +195,30 @@ int ff_qsv_find_surface_idx(QSVFramesContext
*ctx, QSVFrame *frame)
return AVERROR_BUG;
}
+enum AVPictureType ff_qsv_map_pictype(int mfx_pic_type) {
+ enum AVPictureType type = AV_PICTURE_TYPE_NONE;
+ switch (mfx_pic_type & 0x7) {
+ if (mfx_pic_type & MFX_FRAMETYPE_S)
+ type = AV_PICTURE_TYPE_SI;
+ else
+ type = AV_PICTURE_TYPE_I;
+ break;
+ type = AV_PICTURE_TYPE_B;
+ break;
+ if (mfx_pic_type & MFX_FRAMETYPE_S)
+ type = AV_PICTURE_TYPE_SP;
+ else
+ type = AV_PICTURE_TYPE_P;
+ break;
+ }
+
+ return type;
+}
+
static int qsv_load_plugins(mfxSession session, const char *load_plugins,
void *logctx) { diff --git
a/libavcodec/qsv_internal.h b/libavcodec/qsv_internal.h index
975c8de..07ddc59 100644
--- a/libavcodec/qsv_internal.h
+++ b/libavcodec/qsv_internal.h
@@ -48,6 +48,8 @@ typedef struct QSVMid { typedef struct QSVFrame {
AVFrame *frame;
mfxFrameSurface1 surface;
+ mfxExtDecodedFrameInfo dec_info;
+ mfxExtBuffer *ext_param;
int queued;
int used;
@@ -83,6 +85,7 @@ int ff_qsv_codec_id_to_mfx(enum AVCodecID
codec_id);
Post by Zhong Li
int ff_qsv_profile_to_mfx(enum AVCodecID codec_id, int profile);
int ff_qsv_map_pixfmt(enum AVPixelFormat format, uint32_t *fourcc);
+enum AVPictureType ff_qsv_map_pictype(int mfx_pic_type);
int ff_qsv_init_internal_session(AVCodecContext *avctx, mfxSession *session,
const char *load_plugins); diff
--git a/libavcodec/qsvdec.c b/libavcodec/qsvdec.c index
8148beb..c0ed4bd 100644
--- a/libavcodec/qsvdec.c
+++ b/libavcodec/qsvdec.c
@@ -235,6 +235,11 @@ static int alloc_frame(AVCodecContext *avctx,
QSVContext *q, QSVFrame *frame)
frame->surface.Data.MemId = &q->frames_ctx.mids[ret];
}
+ frame->surface.Data.ExtParam = &frame->ext_param;
+ frame->surface.Data.NumExtParam = 1;
+ frame->ext_param = (mfxExtBuffer*)&frame->dec_info;
+ frame->dec_info.Header.BufferId =
MFX_EXTBUFF_DECODED_FRAME_INFO;
Post by Zhong Li
+ frame->dec_info.Header.BufferSz = sizeof(frame->dec_info);
frame->used = 1;
@@ -421,6 +426,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_TFF;
frame->interlaced_frame =
!(outsurf->Info.PicStruct &
MFX_PICSTRUCT_PROGRESSIVE);
Post by Zhong Li
+ frame->pict_type = ff_qsv_map_pictype(out_frame->
dec_info.FrameType);
/* update the surface properties */
if (avctx->pix_fmt == AV_PIX_FMT_QSV)
--
1.8.3.1
Zhong Li
2018-03-22 14:41:52 UTC
Permalink
RGB32 format may be used as overlay with alpha blending.
So add RGB32 format support.

Signed-off-by: ChaoX A Liu <***@intel.com>
Signed-off-by: Zhong Li <***@intel.com>
---
libavutil/hwcontext_qsv.c | 43 +++++++++++++++++++++++++++++++++----------
1 file changed, 33 insertions(+), 10 deletions(-)

diff --git a/libavutil/hwcontext_qsv.c b/libavutil/hwcontext_qsv.c
index 5018a05..0db446b 100644
--- a/libavutil/hwcontext_qsv.c
+++ b/libavutil/hwcontext_qsv.c
@@ -90,6 +90,7 @@ static const struct {
uint32_t fourcc;
} supported_pixel_formats[] = {
{ AV_PIX_FMT_NV12, MFX_FOURCC_NV12 },
+ { AV_PIX_FMT_RGB32,MFX_FOURCC_RGB4 },
{ AV_PIX_FMT_P010, MFX_FOURCC_P010 },
{ AV_PIX_FMT_PAL8, MFX_FOURCC_P8 },
};
@@ -730,6 +731,36 @@ static int qsv_transfer_data_child(AVHWFramesContext *ctx, AVFrame *dst,
return ret;
}

+static int map_frame_to_surface(const AVFrame *frame, mfxFrameSurface1 *surface)
+{
+ switch (frame->format) {
+ case AV_PIX_FMT_NV12:
+ surface->Data.Y = frame->data[0];
+ surface->Data.UV = frame->data[1];
+ break;
+
+ case AV_PIX_FMT_YUV420P:
+ surface->Data.Y = frame->data[0];
+ surface->Data.U = frame->data[1];
+ surface->Data.V = frame->data[2];
+ break;
+
+ case AV_PIX_FMT_RGB32:
+ surface->Data.B = frame->data[0];
+ surface->Data.G = frame->data[0] + 1;
+ surface->Data.R = frame->data[0] + 2;
+ surface->Data.A = frame->data[0] + 3;
+ break;
+
+ default:
+ return MFX_ERR_UNSUPPORTED;
+ }
+ surface->Data.Pitch = frame->linesize[0];
+ surface->Data.TimeStamp = frame->pts;
+
+ return 0;
+}
+
static int qsv_transfer_data_from(AVHWFramesContext *ctx, AVFrame *dst,
const AVFrame *src)
{
@@ -749,11 +780,7 @@ static int qsv_transfer_data_from(AVHWFramesContext *ctx, AVFrame *dst,
}

out.Info = in->Info;
- out.Data.PitchLow = dst->linesize[0];
- out.Data.Y = dst->data[0];
- out.Data.U = dst->data[1];
- out.Data.V = dst->data[2];
- out.Data.A = dst->data[3];
+ map_frame_to_surface(dst, &out);

do {
err = MFXVideoVPP_RunFrameVPPAsync(s->session_download, in, &out, NULL, &sync);
@@ -796,11 +823,7 @@ static int qsv_transfer_data_to(AVHWFramesContext *ctx, AVFrame *dst,
}

in.Info = out->Info;
- in.Data.PitchLow = src->linesize[0];
- in.Data.Y = src->data[0];
- in.Data.U = src->data[1];
- in.Data.V = src->data[2];
- in.Data.A = src->data[3];
+ map_frame_to_surface(src, &in);

do {
err = MFXVideoVPP_RunFrameVPPAsync(s->session_upload, &in, out, NULL, &sync);
--
1.8.3.1
Li, Zhong
2018-03-29 14:05:58 UTC
Permalink
Ping?
-----Original Message-----
From: Li, Zhong
Sent: Thursday, March 22, 2018 10:42 PM
Subject: [PATCH V2 2/3] lavu/hwcontext_qsv: Add support for pix_fmt
RGB32.
RGB32 format may be used as overlay with alpha blending.
So add RGB32 format support.
---
libavutil/hwcontext_qsv.c | 43
+++++++++++++++++++++++++++++++++----------
1 file changed, 33 insertions(+), 10 deletions(-)
diff --git a/libavutil/hwcontext_qsv.c b/libavutil/hwcontext_qsv.c index
5018a05..0db446b 100644
--- a/libavutil/hwcontext_qsv.c
+++ b/libavutil/hwcontext_qsv.c
@@ -90,6 +90,7 @@ static const struct {
uint32_t fourcc;
} supported_pixel_formats[] = {
{ AV_PIX_FMT_NV12, MFX_FOURCC_NV12 },
+ { AV_PIX_FMT_RGB32,MFX_FOURCC_RGB4 },
{ AV_PIX_FMT_P010, MFX_FOURCC_P010 },
{ AV_PIX_FMT_PAL8, MFX_FOURCC_P8 },
};
@@ -730,6 +731,36 @@ static int
qsv_transfer_data_child(AVHWFramesContext *ctx, AVFrame *dst,
return ret;
}
+static int map_frame_to_surface(const AVFrame *frame,
mfxFrameSurface1
+*surface) {
+ switch (frame->format) {
+ surface->Data.Y = frame->data[0];
+ surface->Data.UV = frame->data[1];
+ break;
+
+ surface->Data.Y = frame->data[0];
+ surface->Data.U = frame->data[1];
+ surface->Data.V = frame->data[2];
+ break;
+
+ surface->Data.B = frame->data[0];
+ surface->Data.G = frame->data[0] + 1;
+ surface->Data.R = frame->data[0] + 2;
+ surface->Data.A = frame->data[0] + 3;
+ break;
+
+ return MFX_ERR_UNSUPPORTED;
+ }
+ surface->Data.Pitch = frame->linesize[0];
+ surface->Data.TimeStamp = frame->pts;
+
+ return 0;
+}
+
static int qsv_transfer_data_from(AVHWFramesContext *ctx, AVFrame *dst,
*ctx, AVFrame *dst,
}
out.Info = in->Info;
- out.Data.PitchLow = dst->linesize[0];
- out.Data.Y = dst->data[0];
- out.Data.U = dst->data[1];
- out.Data.V = dst->data[2];
- out.Data.A = dst->data[3];
+ map_frame_to_surface(dst, &out);
do {
err = MFXVideoVPP_RunFrameVPPAsync(s->session_download, in,
qsv_transfer_data_to(AVHWFramesContext *ctx, AVFrame *dst,
}
in.Info = out->Info;
- in.Data.PitchLow = src->linesize[0];
- in.Data.Y = src->data[0];
- in.Data.U = src->data[1];
- in.Data.V = src->data[2];
- in.Data.A = src->data[3];
+ map_frame_to_surface(src, &in);
do {
err = MFXVideoVPP_RunFrameVPPAsync(s->session_upload, &in, out, NULL, &sync);
--
1.8.3.1
Maxym Dmytrychenko
2018-03-29 18:44:45 UTC
Permalink
should be ok,

will take care of it shotly
Post by Zhong Li
Ping?
-----Original Message-----
From: Li, Zhong
Sent: Thursday, March 22, 2018 10:42 PM
Subject: [PATCH V2 2/3] lavu/hwcontext_qsv: Add support for pix_fmt
RGB32.
RGB32 format may be used as overlay with alpha blending.
So add RGB32 format support.
---
libavutil/hwcontext_qsv.c | 43
+++++++++++++++++++++++++++++++++----------
1 file changed, 33 insertions(+), 10 deletions(-)
diff --git a/libavutil/hwcontext_qsv.c b/libavutil/hwcontext_qsv.c index
5018a05..0db446b 100644
--- a/libavutil/hwcontext_qsv.c
+++ b/libavutil/hwcontext_qsv.c
@@ -90,6 +90,7 @@ static const struct {
uint32_t fourcc;
} supported_pixel_formats[] = {
{ AV_PIX_FMT_NV12, MFX_FOURCC_NV12 },
+ { AV_PIX_FMT_RGB32,MFX_FOURCC_RGB4 },
{ AV_PIX_FMT_P010, MFX_FOURCC_P010 },
{ AV_PIX_FMT_PAL8, MFX_FOURCC_P8 },
};
@@ -730,6 +731,36 @@ static int
qsv_transfer_data_child(AVHWFramesContext *ctx, AVFrame *dst,
return ret;
}
+static int map_frame_to_surface(const AVFrame *frame,
mfxFrameSurface1
+*surface) {
+ switch (frame->format) {
+ surface->Data.Y = frame->data[0];
+ surface->Data.UV = frame->data[1];
+ break;
+
+ surface->Data.Y = frame->data[0];
+ surface->Data.U = frame->data[1];
+ surface->Data.V = frame->data[2];
+ break;
+
+ surface->Data.B = frame->data[0];
+ surface->Data.G = frame->data[0] + 1;
+ surface->Data.R = frame->data[0] + 2;
+ surface->Data.A = frame->data[0] + 3;
+ break;
+
+ return MFX_ERR_UNSUPPORTED;
+ }
+ surface->Data.Pitch = frame->linesize[0];
+ surface->Data.TimeStamp = frame->pts;
+
+ return 0;
+}
+
static int qsv_transfer_data_from(AVHWFramesContext *ctx, AVFrame *dst,
*ctx, AVFrame *dst,
}
out.Info = in->Info;
- out.Data.PitchLow = dst->linesize[0];
- out.Data.Y = dst->data[0];
- out.Data.U = dst->data[1];
- out.Data.V = dst->data[2];
- out.Data.A = dst->data[3];
+ map_frame_to_surface(dst, &out);
do {
err = MFXVideoVPP_RunFrameVPPAsync(s->session_download, in,
qsv_transfer_data_to(AVHWFramesContext *ctx, AVFrame *dst,
}
in.Info = out->Info;
- in.Data.PitchLow = src->linesize[0];
- in.Data.Y = src->data[0];
- in.Data.U = src->data[1];
- in.Data.V = src->data[2];
- in.Data.A = src->data[3];
+ map_frame_to_surface(src, &in);
do {
err = MFXVideoVPP_RunFrameVPPAsync(s->session_upload, &in, out, NULL, &sync);
--
1.8.3.1
_______________________________________________
libav-devel mailing list
https://lists.libav.org/mailman/listinfo/libav-devel
Mark Thompson
2018-03-29 19:20:02 UTC
Permalink
Post by Zhong Li
RGB32 format may be used as overlay with alpha blending.
So add RGB32 format support.
---
libavutil/hwcontext_qsv.c | 43 +++++++++++++++++++++++++++++++++----------
1 file changed, 33 insertions(+), 10 deletions(-)
Please write BGRA rather than RGB32. I doubt this will ever run on a big-endian machine, but it would be clearer.
Post by Zhong Li
diff --git a/libavutil/hwcontext_qsv.c b/libavutil/hwcontext_qsv.c
index 5018a05..0db446b 100644
--- a/libavutil/hwcontext_qsv.c
+++ b/libavutil/hwcontext_qsv.c
@@ -90,6 +90,7 @@ static const struct {
uint32_t fourcc;
} supported_pixel_formats[] = {
{ AV_PIX_FMT_NV12, MFX_FOURCC_NV12 },
+ { AV_PIX_FMT_RGB32,MFX_FOURCC_RGB4 },
{ AV_PIX_FMT_P010, MFX_FOURCC_P010 },
{ AV_PIX_FMT_PAL8, MFX_FOURCC_P8 },
};
@@ -730,6 +731,36 @@ static int qsv_transfer_data_child(AVHWFramesContext *ctx, AVFrame *dst,
return ret;
}
+static int map_frame_to_surface(const AVFrame *frame, mfxFrameSurface1 *surface)
+{
+ switch (frame->format) {
Indentation - case labels should be at the same level as switch.
Post by Zhong Li
+ surface->Data.Y = frame->data[0];
+ surface->Data.UV = frame->data[1];
+ break;
+
+ surface->Data.Y = frame->data[0];
+ surface->Data.U = frame->data[1];
+ surface->Data.V = frame->data[2];
+ break;
+
+ surface->Data.B = frame->data[0];
+ surface->Data.G = frame->data[0] + 1;
+ surface->Data.R = frame->data[0] + 2;
+ surface->Data.A = frame->data[0] + 3;
+ break;
+
+ return MFX_ERR_UNSUPPORTED;
+ }
+ surface->Data.Pitch = frame->linesize[0];
What happens if linesize[0] != linesize[1]? (You aren't introducing that problem, but I hadn't seen it before.)
Post by Zhong Li
+ surface->Data.TimeStamp = frame->pts;
+
+ return 0;
+}
+
static int qsv_transfer_data_from(AVHWFramesContext *ctx, AVFrame *dst,
const AVFrame *src)
{
@@ -749,11 +780,7 @@ static int qsv_transfer_data_from(AVHWFramesContext *ctx, AVFrame *dst,
}
out.Info = in->Info;
- out.Data.PitchLow = dst->linesize[0];
- out.Data.Y = dst->data[0];
- out.Data.U = dst->data[1];
- out.Data.V = dst->data[2];
- out.Data.A = dst->data[3];
+ map_frame_to_surface(dst, &out);
do {
err = MFXVideoVPP_RunFrameVPPAsync(s->session_download, in, &out, NULL, &sync);
@@ -796,11 +823,7 @@ static int qsv_transfer_data_to(AVHWFramesContext *ctx, AVFrame *dst,
}
in.Info = out->Info;
- in.Data.PitchLow = src->linesize[0];
- in.Data.Y = src->data[0];
- in.Data.U = src->data[1];
- in.Data.V = src->data[2];
- in.Data.A = src->data[3];
+ map_frame_to_surface(src, &in);
do {
err = MFXVideoVPP_RunFrameVPPAsync(s->session_upload, &in, out, NULL, &sync);
This has slightly changed what gets passed for YUV420P and NV12 - it no longer sets the unused pointers to NULL. Presumably that's always ok, even with old versions?

Thanks,

- Mark
Li, Zhong
2018-04-02 09:32:24 UTC
Permalink
Post by Mark Thompson
Please write BGRA rather than RGB32. I doubt this will ever run on a
big-endian machine, but it would be clearer.
Agree, will update.
Post by Mark Thompson
Post by Zhong Li
diff --git a/libavutil/hwcontext_qsv.c b/libavutil/hwcontext_qsv.c
index 5018a05..0db446b 100644
--- a/libavutil/hwcontext_qsv.c
+++ b/libavutil/hwcontext_qsv.c
@@ -90,6 +90,7 @@ static const struct {
uint32_t fourcc;
} supported_pixel_formats[] = {
{ AV_PIX_FMT_NV12, MFX_FOURCC_NV12 },
+ { AV_PIX_FMT_RGB32,MFX_FOURCC_RGB4 },
{ AV_PIX_FMT_P010, MFX_FOURCC_P010 },
{ AV_PIX_FMT_PAL8, MFX_FOURCC_P8 },
};
@@ -730,6 +731,36 @@ static int
qsv_transfer_data_child(AVHWFramesContext *ctx, AVFrame *dst,
Post by Zhong Li
return ret;
}
+static int map_frame_to_surface(const AVFrame *frame,
+mfxFrameSurface1 *surface) {
+ switch (frame->format) {
Indentation - case labels should be at the same level as switch.
Sure, thanks for remind. Will update.
Post by Mark Thompson
Post by Zhong Li
+ surface->Data.Y = frame->data[0];
+ surface->Data.UV = frame->data[1];
+ break;
+
+ surface->Data.Y = frame->data[0];
+ surface->Data.U = frame->data[1];
+ surface->Data.V = frame->data[2];
+ break;
+
+ surface->Data.B = frame->data[0];
+ surface->Data.G = frame->data[0] + 1;
+ surface->Data.R = frame->data[0] + 2;
+ surface->Data.A = frame->data[0] + 3;
+ break;
+
+ return MFX_ERR_UNSUPPORTED;
+ }
+ surface->Data.Pitch = frame->linesize[0];
What happens if linesize[0] != linesize[1]? (You aren't introducing that
problem, but I hadn't seen it before.)
I don't think MSDK can handle this case perfectly since there is only one pitch.
Take YUV420p as example, IMHO it is required linesize of Y must be twice of U and V.
Post by Mark Thompson
Post by Zhong Li
+ surface->Data.TimeStamp = frame->pts;
+
+ return 0;
+}
+
static int qsv_transfer_data_from(AVHWFramesContext *ctx, AVFrame
*dst,
-749,11
AVFrame *dst,
Post by Zhong Li
}
out.Info = in->Info;
- out.Data.PitchLow = dst->linesize[0];
- out.Data.Y = dst->data[0];
- out.Data.U = dst->data[1];
- out.Data.V = dst->data[2];
- out.Data.A = dst->data[3];
+ map_frame_to_surface(dst, &out);
do {
err = MFXVideoVPP_RunFrameVPPAsync(s->session_download,
in,
qsv_transfer_data_to(AVHWFramesContext *ctx, AVFrame *dst,
Post by Zhong Li
}
in.Info = out->Info;
- in.Data.PitchLow = src->linesize[0];
- in.Data.Y = src->data[0];
- in.Data.U = src->data[1];
- in.Data.V = src->data[2];
- in.Data.A = src->data[3];
+ map_frame_to_surface(src, &in);
do {
err = MFXVideoVPP_RunFrameVPPAsync(s->session_upload,
&in,
Post by Zhong Li
out, NULL, &sync);
This has slightly changed what gets passed for YUV420P and NV12 - it no
longer sets the unused pointers to NULL. Presumably that's always ok,
even with old versions?
Yes, it is. But as you can see there were still other unused pointers hadn't been set to NULL before this patch.
Since it is UNUSED, IMHO it is not necessary to memset them.
Post by Mark Thompson
Thanks,
- Mark
Mark Thompson
2018-04-02 12:45:21 UTC
Permalink
Post by Li, Zhong
Post by Mark Thompson
Post by Zhong Li
diff --git a/libavutil/hwcontext_qsv.c b/libavutil/hwcontext_qsv.c
index 5018a05..0db446b 100644
--- a/libavutil/hwcontext_qsv.c
+++ b/libavutil/hwcontext_qsv.c
...
+ surface->Data.Y = frame->data[0];
+ surface->Data.UV = frame->data[1];
+ break;
+
+ surface->Data.Y = frame->data[0];
+ surface->Data.U = frame->data[1];
+ surface->Data.V = frame->data[2];
+ break;
+
+ surface->Data.B = frame->data[0];
+ surface->Data.G = frame->data[0] + 1;
+ surface->Data.R = frame->data[0] + 2;
+ surface->Data.A = frame->data[0] + 3;
+ break;
+
+ return MFX_ERR_UNSUPPORTED;
+ }
+ surface->Data.Pitch = frame->linesize[0];
What happens if linesize[0] != linesize[1]? (You aren't introducing that
problem, but I hadn't seen it before.)
I don't think MSDK can handle this case perfectly since there is only one pitch.
Take YUV420p as example, IMHO it is required linesize of Y must be twice of U and V.
That isn't going to be true for a general frame in libav - the pitches for each plane are independent. Since they are usually created by taking the width of the plane and rounding up to the appropriate alignment (usually 32, I think) it will work for widths which are multiples of large powers of two - e.g. 1920 width will work because both 1920 and 960 are already aligned to a 32-byte boundary. It won't work for less aligned widths (e.g. 720 width from NTSC or PAL will give luma pitch = align(720, 32) = 736 but chroma pitch = align(360, 32) = 384), nor will it work for other ways of laying out the frame such as line-interleaving.

This problem was preexisting, though, so I guess it isn't necessary to deal with it in this patch. Not sure what the right answer is - maybe it could just reject non-matching pitches and return an error? Or it could make a temporary frame with the stricter alignment and copy to that before uploading? (Though that might be slow and defeat the point of this upload path.)

- Mark
Maxym Dmytrychenko
2018-04-02 13:06:57 UTC
Permalink
how about second option : temporary frame with the stricter alignment and
copy to that before uploading
but with log/INFO message included ?
Post by Li, Zhong
Post by Li, Zhong
Post by Mark Thompson
Post by Zhong Li
diff --git a/libavutil/hwcontext_qsv.c b/libavutil/hwcontext_qsv.c
index 5018a05..0db446b 100644
--- a/libavutil/hwcontext_qsv.c
+++ b/libavutil/hwcontext_qsv.c
...
+ surface->Data.Y = frame->data[0];
+ surface->Data.UV = frame->data[1];
+ break;
+
+ surface->Data.Y = frame->data[0];
+ surface->Data.U = frame->data[1];
+ surface->Data.V = frame->data[2];
+ break;
+
+ surface->Data.B = frame->data[0];
+ surface->Data.G = frame->data[0] + 1;
+ surface->Data.R = frame->data[0] + 2;
+ surface->Data.A = frame->data[0] + 3;
+ break;
+
+ return MFX_ERR_UNSUPPORTED;
+ }
+ surface->Data.Pitch = frame->linesize[0];
What happens if linesize[0] != linesize[1]? (You aren't introducing
that
Post by Li, Zhong
Post by Mark Thompson
problem, but I hadn't seen it before.)
I don't think MSDK can handle this case perfectly since there is only
one pitch.
Post by Li, Zhong
Take YUV420p as example, IMHO it is required linesize of Y must be twice
of U and V.
That isn't going to be true for a general frame in libav - the pitches for
each plane are independent. Since they are usually created by taking the
width of the plane and rounding up to the appropriate alignment (usually
32, I think) it will work for widths which are multiples of large powers of
two - e.g. 1920 width will work because both 1920 and 960 are already
aligned to a 32-byte boundary. It won't work for less aligned widths (e.g.
720 width from NTSC or PAL will give luma pitch = align(720, 32) = 736 but
chroma pitch = align(360, 32) = 384), nor will it work for other ways of
laying out the frame such as line-interleaving.
This problem was preexisting, though, so I guess it isn't necessary to
deal with it in this patch. Not sure what the right answer is - maybe it
could just reject non-matching pitches and return an error? Or it could
make a temporary frame with the stricter alignment and copy to that before
uploading? (Though that might be slow and defeat the point of this upload
path.)
- Mark
_______________________________________________
libav-devel mailing list
https://lists.libav.org/mailman/listinfo/libav-devel
Li, Zhong
2018-04-03 07:51:12 UTC
Permalink
Thompson
Sent: Monday, April 2, 2018 8:45 PM
Subject: Re: [libav-devel] [PATCH V2 2/3] lavu/hwcontext_qsv: Add support
for pix_fmt RGB32.
Post by Li, Zhong
Post by Mark Thompson
Post by Zhong Li
diff --git a/libavutil/hwcontext_qsv.c b/libavutil/hwcontext_qsv.c
index 5018a05..0db446b 100644
--- a/libavutil/hwcontext_qsv.c
+++ b/libavutil/hwcontext_qsv.c
...
+ surface->Data.Y = frame->data[0];
+ surface->Data.UV = frame->data[1];
+ break;
+
+ surface->Data.Y = frame->data[0];
+ surface->Data.U = frame->data[1];
+ surface->Data.V = frame->data[2];
+ break;
+
+ surface->Data.B = frame->data[0];
+ surface->Data.G = frame->data[0] + 1;
+ surface->Data.R = frame->data[0] + 2;
+ surface->Data.A = frame->data[0] + 3;
+ break;
+
+ return MFX_ERR_UNSUPPORTED;
+ }
+ surface->Data.Pitch = frame->linesize[0];
What happens if linesize[0] != linesize[1]? (You aren't introducing
that problem, but I hadn't seen it before.)
I don't think MSDK can handle this case perfectly since there is only one
pitch.
Post by Li, Zhong
Take YUV420p as example, IMHO it is required linesize of Y must be twice
of U and V.
That isn't going to be true for a general frame in libav - the pitches for each
plane are independent. Since they are usually created by taking the width
of the plane and rounding up to the appropriate alignment (usually 32, I think)
it will work for widths which are multiples of large powers of two - e.g. 1920
width will work because both 1920 and 960 are already aligned to a 32-byte
boundary. It won't work for less aligned widths (e.g. 720 width from NTSC
or PAL will give luma pitch = align(720, 32) = 736 but chroma pitch = align(360,
32) = 384), nor will it work for other ways of laying out the frame such as
line-interleaving.
Perfectly correct. Actually I've seen a bug when transcoding a 720x480 clips.
In that case, must 64 bytes aligned to make sure luma_pitch = 2 x chroma_pitch.
This problem was preexisting, though, so I guess it isn't necessary to deal
with it in this patch. Not sure what the right answer is - maybe it could just
reject non-matching pitches and return an error? Or it could make a
temporary frame with the stricter alignment and copy to that before
uploading? (Though that might be slow and defeat the point of this upload
path.)
"Return an error" is not user-friendly because they can do nothing after receive such an error message.
Agree with Maxym, I prefer the second option too and there are existing piece of code to handle the cases haven't aligned as libmfx's requirement (though haven't handle the 720x480 yuv420p issue).
But I'm not sure why it will "defeat the point of this upload path". Could you help to give some detail info?
Li, Zhong
2018-03-29 14:05:32 UTC
Permalink
Ping?
-----Original Message-----
From: Li, Zhong
Sent: Thursday, March 22, 2018 10:42 PM
Subject: [PATCH V2 1/3] lavc/qsvdec: set complete_frame flags for
progressive picture
Set the flag MFX_BITSTREAM_COMPLETE_FRAME when it is a progressive picture.
This can fix vc1 decoding segment fault issues because can't set the start
code correctly.
See: ./avconv -hwaccel qsv -c:v vc1_qsv -i /fate-suite/vc1/SA00040.vc1 -vf
"hwdownload, format=nv12" -f rawvideo /dev/null
v2: fix some h264 interlaced clips regression a. field_order of some h264
interlaced video (e.g: cama3_vtc_b.avc) is marked as AV_FIELD_UNKNOWN
in h264_parser.c. This is not a completed frames.
So only set the MFX_BITSTREAM_COMPLETE_FRAME when it is
progressive.
b. some clips have both progressive and interlaced frames
(e.g.CAPAMA3_Sand_F.264),
the parsed field_order maybe changed druing the decoding progress.
This patch has been verified for other codecs(mpeg2/hevc/vp8).
---
libavcodec/qsvdec.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/libavcodec/qsvdec.c b/libavcodec/qsvdec.c index
c74ec68..8148beb 100644
--- a/libavcodec/qsvdec.c
+++ b/libavcodec/qsvdec.c
@@ -321,6 +321,8 @@ static int qsv_decode(AVCodecContext *avctx, QSVContext *q,
bs.DataLength = avpkt->size;
bs.MaxLength = bs.DataLength;
bs.TimeStamp = avpkt->pts;
+ if (avctx->field_order == AV_FIELD_PROGRESSIVE)
+ bs.DataFlag |= MFX_BITSTREAM_COMPLETE_FRAME;
}
sync = av_mallocz(sizeof(*sync));
@@ -509,6 +511,7 @@ int ff_qsv_process_data(AVCodecContext *avctx, QSVContext *q,
pkt->data, pkt->size, pkt->pts, pkt->dts,
pkt->pos);
+ avctx->field_order = q->parser->field_order;
/* TODO: flush delayed frames on reinit */
if (q->parser->format != q->orig_pix_fmt ||
*q,
avctx->height = q->parser->height;
avctx->coded_width = q->parser->coded_width;
avctx->coded_height = q->parser->coded_height;
- avctx->field_order = q->parser->field_order;
avctx->level = q->avctx_internal->level;
avctx->profile = q->avctx_internal->profile;
--
1.8.3.1
Loading...