Zhong Li
2018-09-06 09:29:46 UTC
RGB32(AV_PIX_FMT_BGRA on intel platforms) format may be used as overlay with alpha blending.
So add AV_PIX_FMT_BGRA format support.
Rename RGB32 to be BGRA to make it clearer as Mark Thompson's suggestion.
V2: Add P010 format support else will introduce HEVC 10bit decoding regression.
Thanks for LinJie's discovery.
Signed-off-by: Zhong Li <***@intel.com>
Verified-by: Fu, Linjie <***@intel.com>
---
libavfilter/qsvvpp.c | 2 +-
libavutil/hwcontext_qsv.c | 44 ++++++++++++++++++++++++++++++++++----------
2 files changed, 35 insertions(+), 11 deletions(-)
diff --git a/libavfilter/qsvvpp.c b/libavfilter/qsvvpp.c
index 75966b3..535e428 100644
--- a/libavfilter/qsvvpp.c
+++ b/libavfilter/qsvvpp.c
@@ -142,7 +142,7 @@ static int pix_fmt_to_mfx_fourcc(int format)
return MFX_FOURCC_NV12;
case AV_PIX_FMT_YUYV422:
return MFX_FOURCC_YUY2;
- case AV_PIX_FMT_RGB32:
+ case AV_PIX_FMT_BGRA:
return MFX_FOURCC_RGB4;
}
diff --git a/libavutil/hwcontext_qsv.c b/libavutil/hwcontext_qsv.c
index b3eb4a3..3ee934e 100644
--- a/libavutil/hwcontext_qsv.c
+++ b/libavutil/hwcontext_qsv.c
@@ -90,6 +90,7 @@ static const struct {
uint32_t fourcc;
} supported_pixel_formats[] = {
{ AV_PIX_FMT_NV12, MFX_FOURCC_NV12 },
+ { AV_PIX_FMT_BGRA, MFX_FOURCC_RGB4 },
{ AV_PIX_FMT_P010, MFX_FOURCC_P010 },
{ AV_PIX_FMT_PAL8, MFX_FOURCC_P8 },
};
@@ -731,6 +732,37 @@ static int qsv_transfer_data_child(AVHWFramesContext *ctx, AVFrame *dst,
return ret;
}
+static int map_frame_to_surface(const AVFrame *frame, mfxFrameSurface1 *surface)
+{
+ switch (frame->format) {
+ case AV_PIX_FMT_NV12:
+ case AV_PIX_FMT_P010:
+ surface->Data.Y = frame->data[0];
+ surface->Data.UV = frame->data[1];
+ break;
+
+ case AV_PIX_FMT_YUV420P:
+ surface->Data.Y = frame->data[0];
+ surface->Data.U = frame->data[1];
+ surface->Data.V = frame->data[2];
+ break;
+
+ case AV_PIX_FMT_BGRA:
+ surface->Data.B = frame->data[0];
+ surface->Data.G = frame->data[0] + 1;
+ surface->Data.R = frame->data[0] + 2;
+ surface->Data.A = frame->data[0] + 3;
+ break;
+
+ default:
+ return MFX_ERR_UNSUPPORTED;
+ }
+ surface->Data.Pitch = frame->linesize[0];
+ surface->Data.TimeStamp = frame->pts;
+
+ return 0;
+}
+
static int qsv_transfer_data_from(AVHWFramesContext *ctx, AVFrame *dst,
const AVFrame *src)
{
@@ -750,11 +782,7 @@ static int qsv_transfer_data_from(AVHWFramesContext *ctx, AVFrame *dst,
}
out.Info = in->Info;
- out.Data.PitchLow = dst->linesize[0];
- out.Data.Y = dst->data[0];
- out.Data.U = dst->data[1];
- out.Data.V = dst->data[2];
- out.Data.A = dst->data[3];
+ map_frame_to_surface(dst, &out);
do {
err = MFXVideoVPP_RunFrameVPPAsync(s->session_download, in, &out, NULL, &sync);
@@ -797,11 +825,7 @@ static int qsv_transfer_data_to(AVHWFramesContext *ctx, AVFrame *dst,
}
in.Info = out->Info;
- in.Data.PitchLow = src->linesize[0];
- in.Data.Y = src->data[0];
- in.Data.U = src->data[1];
- in.Data.V = src->data[2];
- in.Data.A = src->data[3];
+ map_frame_to_surface(src, &in);
do {
err = MFXVideoVPP_RunFrameVPPAsync(s->session_upload, &in, out, NULL, &sync);
So add AV_PIX_FMT_BGRA format support.
Rename RGB32 to be BGRA to make it clearer as Mark Thompson's suggestion.
V2: Add P010 format support else will introduce HEVC 10bit decoding regression.
Thanks for LinJie's discovery.
Signed-off-by: Zhong Li <***@intel.com>
Verified-by: Fu, Linjie <***@intel.com>
---
libavfilter/qsvvpp.c | 2 +-
libavutil/hwcontext_qsv.c | 44 ++++++++++++++++++++++++++++++++++----------
2 files changed, 35 insertions(+), 11 deletions(-)
diff --git a/libavfilter/qsvvpp.c b/libavfilter/qsvvpp.c
index 75966b3..535e428 100644
--- a/libavfilter/qsvvpp.c
+++ b/libavfilter/qsvvpp.c
@@ -142,7 +142,7 @@ static int pix_fmt_to_mfx_fourcc(int format)
return MFX_FOURCC_NV12;
case AV_PIX_FMT_YUYV422:
return MFX_FOURCC_YUY2;
- case AV_PIX_FMT_RGB32:
+ case AV_PIX_FMT_BGRA:
return MFX_FOURCC_RGB4;
}
diff --git a/libavutil/hwcontext_qsv.c b/libavutil/hwcontext_qsv.c
index b3eb4a3..3ee934e 100644
--- a/libavutil/hwcontext_qsv.c
+++ b/libavutil/hwcontext_qsv.c
@@ -90,6 +90,7 @@ static const struct {
uint32_t fourcc;
} supported_pixel_formats[] = {
{ AV_PIX_FMT_NV12, MFX_FOURCC_NV12 },
+ { AV_PIX_FMT_BGRA, MFX_FOURCC_RGB4 },
{ AV_PIX_FMT_P010, MFX_FOURCC_P010 },
{ AV_PIX_FMT_PAL8, MFX_FOURCC_P8 },
};
@@ -731,6 +732,37 @@ static int qsv_transfer_data_child(AVHWFramesContext *ctx, AVFrame *dst,
return ret;
}
+static int map_frame_to_surface(const AVFrame *frame, mfxFrameSurface1 *surface)
+{
+ switch (frame->format) {
+ case AV_PIX_FMT_NV12:
+ case AV_PIX_FMT_P010:
+ surface->Data.Y = frame->data[0];
+ surface->Data.UV = frame->data[1];
+ break;
+
+ case AV_PIX_FMT_YUV420P:
+ surface->Data.Y = frame->data[0];
+ surface->Data.U = frame->data[1];
+ surface->Data.V = frame->data[2];
+ break;
+
+ case AV_PIX_FMT_BGRA:
+ surface->Data.B = frame->data[0];
+ surface->Data.G = frame->data[0] + 1;
+ surface->Data.R = frame->data[0] + 2;
+ surface->Data.A = frame->data[0] + 3;
+ break;
+
+ default:
+ return MFX_ERR_UNSUPPORTED;
+ }
+ surface->Data.Pitch = frame->linesize[0];
+ surface->Data.TimeStamp = frame->pts;
+
+ return 0;
+}
+
static int qsv_transfer_data_from(AVHWFramesContext *ctx, AVFrame *dst,
const AVFrame *src)
{
@@ -750,11 +782,7 @@ static int qsv_transfer_data_from(AVHWFramesContext *ctx, AVFrame *dst,
}
out.Info = in->Info;
- out.Data.PitchLow = dst->linesize[0];
- out.Data.Y = dst->data[0];
- out.Data.U = dst->data[1];
- out.Data.V = dst->data[2];
- out.Data.A = dst->data[3];
+ map_frame_to_surface(dst, &out);
do {
err = MFXVideoVPP_RunFrameVPPAsync(s->session_download, in, &out, NULL, &sync);
@@ -797,11 +825,7 @@ static int qsv_transfer_data_to(AVHWFramesContext *ctx, AVFrame *dst,
}
in.Info = out->Info;
- in.Data.PitchLow = src->linesize[0];
- in.Data.Y = src->data[0];
- in.Data.U = src->data[1];
- in.Data.V = src->data[2];
- in.Data.A = src->data[3];
+ map_frame_to_surface(src, &in);
do {
err = MFXVideoVPP_RunFrameVPPAsync(s->session_upload, &in, out, NULL, &sync);
--
2.7.4
2.7.4