I have three firewire cameras which can automatically synchronize via hardware and I'm trying to capture frames, save them into my hard disk and display them in a window.
Everything works fine, but at the moment, I'm only able to display frames in three different windows at the same time.
I'd like to display the frames in a single window but I do not know how to do this. At this link there is cvShowManyImages() function but you have to pass three frames at the same time but I have a for() loop which can consider one frame at a time.
This is the code which I'm using:
for ( int j = 0; j < k_numImages; j++ )
{
// Display the timestamps for all cameras to show that the image
// capture is synchronized for each image
for ( unsigned int i = 0; i < numCameras; i++ )
{
Image image;
error = ppCameras[i]->RetrieveBuffer( &image );
if (error != PGRERROR_OK)
{
PrintError( error );
return -1;
}
IplImage* destImage = ConvertImageToOpenCV(&image);
char titolo[50];
sprintf(titolo, "titolo%d", i);
cvShowImage(titolo, destImage);
waitKey(1);
}
}
It works well but creates a different windows for each camera while I want to display all cameras frames in the same window.
Can you help me, please?
EDIT: this is the ConvertImageToOpenCV() function.
IplImage* ConvertImageToOpenCV(Image* pImage)
{
IplImage* cvImage = NULL;
bool bColor = true;
CvSize mySize;
mySize.height = pImage->GetRows();
mySize.width = pImage->GetCols();
switch ( pImage->GetPixelFormat() )
{
case PIXEL_FORMAT_MONO8: cvImage = cvCreateImageHeader(mySize, 8, 1 );
cvImage->depth = IPL_DEPTH_8U;
cvImage->nChannels = 1;
bColor = false;
break;
case PIXEL_FORMAT_411YUV8: cvImage = cvCreateImageHeader(mySize, 8, 3 );
cvImage->depth = IPL_DEPTH_8U;
cvImage->nChannels = 3;
break;
case PIXEL_FORMAT_422YUV8: cvImage = cvCreateImageHeader(mySize, 8, 3 );
cvImage->depth = IPL_DEPTH_8U;
cvImage->nChannels = 3;
break;
case PIXEL_FORMAT_444YUV8: cvImage = cvCreateImageHeader(mySize, 8, 3 );
cvImage->depth = IPL_DEPTH_8U;
cvImage->nChannels = 3;
break;
case PIXEL_FORMAT_RGB8: cvImage = cvCreateImageHeader(mySize, 8, 3 );
cvImage->depth = IPL_DEPTH_8U;
cvImage->nChannels = 3;
break;
case PIXEL_FORMAT_MONO16: cvImage = cvCreateImageHeader(mySize, 16, 1 );
cvImage->depth = IPL_DEPTH_16U;
cvImage->nChannels = 1;
bColor = false;
break;
case PIXEL_FORMAT_RGB16: cvImage = cvCreateImageHeader(mySize, 16, 3 );
cvImage->depth = IPL_DEPTH_16U;
cvImage->nChannels = 3;
break;
case PIXEL_FORMAT_S_MONO16: cvImage = cvCreateImageHeader(mySize, 16, 1 );
cvImage->depth = IPL_DEPTH_16U;
cvImage->nChannels = 1;
bColor = false;
break;
case PIXEL_FORMAT_S_RGB16: cvImage = cvCreateImageHeader(mySize, 16, 3 );
cvImage->depth = IPL_DEPTH_16U;
cvImage->nChannels = 3;
break;
case PIXEL_FORMAT_RAW8: cvImage = cvCreateImageHeader(mySize, 8, 3 );
cvImage->depth = IPL_DEPTH_8U;
cvImage->nChannels = 3;
break;
case PIXEL_FORMAT_RAW16: cvImage = cvCreateImageHeader(mySize, 8, 3 );
cvImage->depth = IPL_DEPTH_8U;
cvImage->nChannels = 3;
break;
case PIXEL_FORMAT_MONO12: printf("Not supported by OpenCV");
bColor = false;
break;
case PIXEL_FORMAT_RAW12: printf("Not supported by OpenCV");
break;
case PIXEL_FORMAT_BGR: cvImage = cvCreateImageHeader(mySize, 8, 3 );
cvImage->depth = IPL_DEPTH_8U;
cvImage->nChannels = 3;
break;
case PIXEL_FORMAT_BGRU: cvImage = cvCreateImageHeader(mySize, 8, 4 );
cvImage->depth = IPL_DEPTH_8U;
cvImage->nChannels = 4;
break;
case PIXEL_FORMAT_RGBU: cvImage = cvCreateImageHeader(mySize, 8, 4 );
cvImage->depth = IPL_DEPTH_8U;
cvImage->nChannels = 4;
break;
default: printf("Some error occured...\n");
return NULL;
}
if(bColor) {
if(!bInitialized)
{
colorImage.SetData(new unsigned char[pImage->GetCols() * pImage->GetRows()*3], pImage->GetCols() * pImage->GetRows()*3);
bInitialized = true;
}
pImage->Convert(PIXEL_FORMAT_BGR, &colorImage); //needs to be as BGR to be saved
cvImage->width = colorImage.GetCols();
cvImage->height = colorImage.GetRows();
cvImage->widthStep = colorImage.GetStride();
cvImage->origin = 0; //interleaved color channels
cvImage->imageDataOrigin = (char*)colorImage.GetData(); //DataOrigin and Data same pointer, no ROI
cvImage->imageData = (char*)(colorImage.GetData());
cvImage->widthStep = colorImage.GetStride();
cvImage->nSize = sizeof (IplImage);
cvImage->imageSize = cvImage->height * cvImage->widthStep;
}
else
{
cvImage->imageDataOrigin = (char*)(pImage->GetData());
cvImage->imageData = (char*)(pImage->GetData());
cvImage->widthStep = pImage->GetStride();
cvImage->nSize = sizeof (IplImage);
cvImage->imageSize = cvImage->height * cvImage->widthStep;
//at this point cvImage contains a valid IplImage
}
return cvImage;
}
I could not test the code below for obvious reasons, but it illustrates one approach:
The idea is to create an array of
IplImage*
to store the images retrieved by the cameras, so after the loop you have access to all those 3 images and be able to display them on a single window.EDIT:
Just to summarize out private chat, the problem is that
cvShowManyImages()
takes colored (3-channel) images, and his camera was returning grayscale (single-channel) images. The solution was simply to change the implementation ofcvShowManyImages()
, from:To: