I am developing a UWP C# application for the Hololens2. I am trying to access the front facing camera using MediaCapture and MediaFrameReader. So far everything works fine as long as I keep the resolution at 896x504. As soon as I try to change the resolution to 2272x1278, the image I'm getting is corrupted.
As I am only interested in the gray values, I just copy the luminance values from the software bitmap. With the NV12 format, these should be right at the beginning of the buffer. When I visualize the copied data, I get an image like the one below (here the camera is looking at a white wall). The image shows strange diagonal stripes and nothing is visible. Screenshot
I also tried the YUV2 format and it works fine, but copying the pixel data takes more time.
Am I missing something?
[EDIT] I put a demo application on Github to showcase the problem.
Here are some code snipet I am using to accessing the pixel data:
- init MediaCapture:
string deviceId = await GetDeviceId();
var mediaInitSettings = new MediaCaptureInitializationSettings { VideoDeviceId = deviceId };
IReadOnlyList<MediaCaptureVideoProfile> profiles = MediaCapture.FindAllVideoProfiles(deviceId);
var match = (from profile in profiles
from desc in profile.SupportedRecordMediaDescription
where desc.Subtype == "NV12" && desc.Width == 2272 && desc.Height == 1278 && Math.Abs(desc.FrameRate - 30) < Tolerance
select new { profile, desc}).FirstOrDefault();
if (match != null)
{
mediaInitSettings.VideoProfile = match.profile;
mediaInitSettings.RecordMediaDescription = match.desc;
mediaInitSettings.SharingMode = MediaCaptureSharingMode.ExclusiveControl;
mediaInitSettings.MemoryPreference = MediaCaptureMemoryPreference.Cpu;
}
else
{
mediaInitSettings.VideoProfile = profiles[0];
}
_mediaCapture = new MediaCapture();
await _mediaCapture.InitializeAsync(mediaInitSettings);
- init FrameReader:
const MediaStreamType mediaStreamType = MediaStreamType.VideoRecord;
CameraParameters parameters = new CameraParameters(_cameraProfile);
MediaFrameSource source = _mediaCapture.FrameSources.Values.Single(frameSource => frameSource.Info.MediaStreamType == mediaStreamType);
var preferredFormat = source.SupportedFormats.Where(format =>
format.VideoFormat.Height == parameters.CameraResolutionHeight
&&
format.VideoFormat.Width == parameters.CameraResolutionWidth
&&
Math.Round(format.FrameRate.Numerator / format.FrameRate.Denominator - parameters.FrameRate) < Tolerance
&&
format.Subtype == "NV12");
var selectedFormat = preferredFormat.FirstOrDefault();
await source.SetFormatAsync(selectedFormat);
_frameReader = await _mediaCapture.CreateFrameReaderAsync(source, selectedFormat.Subtype);
_frameReader.FrameArrived += OnFrameArrived;
- copy the pixel data:
private async void OnFrameArrived(MediaFrameReader sender, MediaFrameArrivedEventArgs args)
{
if (sender == null) throw new ArgumentNullException(nameof(sender));
if (args == null) throw new ArgumentNullException(nameof(args));
using (MediaFrameReference mediaFrameReference = sender.TryAcquireLatestFrame())
{
if (mediaFrameReference == null) return;
var videoMediaFrame = mediaFrameReference.VideoMediaFrame;
var softwareBitmap = videoMediaFrame?.SoftwareBitmap;
if (softwareBitmap != null)
{
var width = softwareBitmap.PixelWidth;
var height = softwareBitmap.PixelHeight;
// copy only luminance plane of NV12
byte[] rawPixelData = new byte[width * height];
using (var buffer = softwareBitmap.LockBuffer(BitmapBufferAccessMode.Read))
using (var reference = buffer.CreateReference())
unsafe
{
byte* pixelData;
uint capacity;
((IMemoryBufferByteAccess)reference).GetBuffer(out pixelData, out capacity);
Marshal.Copy((IntPtr)pixelData, rawPixelData, 0, rawPixelData.Length);
}
FrameArrived?.Invoke(this, new FrameArrivedEventArgs(rawPixelData, width, height));
softwareBitmap.Dispose();
}
}
}