1、首先去大恒官网安装软件 安装完成后有以下文件夹 2、配置VS2010的环境 3、编写C++代码

#include

#include

#include

#include

#include

#include

#include

#include

#include

#include

#include

//相机头文件

#include "GxIAPI.h"

#include "DxImageProc.h"

#include

using namespace std;

using namespace cv;

using std::vector;

using std::string;

using std::ostringstream;

ostringstream name;

GX_DEV_HANDLE m_hDevice; ///< 设备句柄

BYTE *m_pBufferRaw; ///< 原始图像数据

BYTE *m_pBufferRGB; ///< RGB图像数据,用于显示和保存bmp图像

int64_t m_nImageHeight; ///< 原始图像高

int64_t m_nImageWidth; ///< 原始图像宽

int64_t m_nPayLoadSize;

int64_t m_nPixelColorFilter; ///< Bayer格式

Mat test;

Mat gray;

#define MaxImageWidth 1292

#define MaxImageHeigth 964

#define WChouYang 1

#define HChouYang 1

static void GX_STDC OnFrameCallbackFun(GX_FRAME_CALLBACK_PARAM* pFrame)

{

if (pFrame->status == 0)

{

memcpy(m_pBufferRaw, pFrame->pImgBuf, pFrame->nImgSize);

// RGB转换

DxRaw8toRGB24(m_pBufferRaw

, m_pBufferRGB

, (VxUint32)(m_nImageWidth)

, (VxUint32)(m_nImageHeight)

, RAW2RGB_NEIGHBOUR

, DX_PIXEL_COLOR_FILTER(m_nPixelColorFilter)

, false);

memcpy(www.hack95.com, m_pBufferRGB, m_nImageWidth*m_nImageHeight * 3);//m_pBufferRGB赋值给test

imshow("test",test);

imwrite("./picturecollection/image" + name.str() + ".bmp", test);

waitKey(15);

}

return;

}

/***********相机关闭函数**************/

void cameraoff()

{

GX_STATUS emStatus = GX_STATUS_SUCCESS;

//发送停采命令

emStatus = GXSendCommand(m_hDevice, GX_COMMAND_ACQUISITION_STOP);

//注销采集回调

emStatus = GXUnregisterCaptureCallback(m_hDevice);

cout<<"停止采集"<

//关闭设备

emStatus = GXCloseDevice(m_hDevice);

if (emStatus != GX_STATUS_SUCCESS)

{

// 错误处理

}

cout<<"设备已关闭"<

}

int CameraOpreation()

{

// 初始化库

GX_STATUS emStatus = GX_STATUS_SUCCESS;

emStatus = GXInitLib();

if (emStatus != GX_STATUS_SUCCESS)

{

return 1;

}

//打开相机

GX_OPEN_PARAM openParam;

uint32_t nDeviceNum = 0;

openParam.accessMode = GX_ACCESS_EXCLUSIVE;

openParam.openMode = GX_OPEN_INDEX;

openParam.pszContent = "1";

//打开设备

emStatus = GXOpenDevice(&openParam, &m_hDevice);

cout<<"emStatus="<

//cout<<"GX_STATUS_SUCCESS="<

// 枚举设备列表

emStatus = GXUpdateDeviceList(&nDeviceNum, 1000);

if ((emStatus != GX_STATUS_SUCCESS) || (nDeviceNum <= 0))

{

return 0;

}

//设置采集模式连续采集

emStatus = GXSetEnum(m_hDevice, GX_ENUM_ACQUISITION_MODE, GX_ACQ_MODE_CONTINUOUS);//设置自动采集模式

//

emStatus = GXSetInt(m_hDevice, GX_INT_ACQUISITION_SPEED_LEVEL, 1);

emStatus = GXSetEnum(m_hDevice, GX_ENUM_BALANCE_WHITE_AUTO, GX_BALANCE_WHITE_AUTO_CONTINUOUS);//设置自动白平衡

bool bColorFliter = false;

// 获取图像大小

emStatus = GXGetInt(m_hDevice, GX_INT_PAYLOAD_SIZE, &m_nPayLoadSize);

// 获取宽度

emStatus = GXGetInt(m_hDevice, GX_INT_WIDTH, &m_nImageWidth);

// 获取高度

emStatus = GXGetInt(m_hDevice, GX_INT_HEIGHT, &m_nImageHeight);

test.create(m_nImageHeight, m_nImageWidth, CV_8UC3);

//判断相机是否支持bayer格式

bool m_bColorFilter;

emStatus = GXIsImplemented(m_hDevice, GX_ENUM_PIXEL_COLOR_FILTER, &m_bColorFilter); 判断设备是否支持流通道数据包功能

if (m_bColorFilter)

{

emStatus = GXGetEnum(m_hDevice, GX_ENUM_PIXEL_COLOR_FILTER, &m_nPixelColorFilter);

}

m_pBufferRGB = new BYTE[(size_t)(m_nImageWidth * m_nImageHeight * 3)];// //输出图像BGR数据

if (m_pBufferRGB == NULL)

{

return false;

}

//为存储原始图像数据申请空间

m_pBufferRaw = new BYTE[(size_t)m_nPayLoadSize];

if (m_pBufferRaw == NULL)

{

delete[]m_pBufferRGB;

m_pBufferRGB = NULL;

return false;

}

/*********注册图像处理回调函数 **************/

emStatus = GXRegisterCaptureCallback(m_hDevice, NULL, OnFrameCallbackFun);

/**********发送开采命令**********/

emStatus = GXSendCommand(m_hDevice, GX_COMMAND_ACQUISITION_START);

cout<<"开始采集!"<

}

int Picturecatch()

{

CameraOpreation();

cout<<"get picture!"<

return 0;

}

void main(void)

{

Picturecatch();

waitKey(200);

Sleep(10000);

cameraoff();

}

测试效果如下: 文章来源——公众号:机器人视觉

  在图像处理时经常会用到从摄像头读取图像。OPENCV有提供的方法来实现,非常简单,不用多说。而使用VC++则没有那么容易,下面介绍使用CImage和DirectShow读取摄像头图像,并显示的对话框中。

???????? 我用的开发工具是VS2010。

源代码下载,使用VS2010编译通过。http://www.hack95.com/detail/sdlypyzq/4087013?

?

一、创建一个MFC对话框程序,工程起名为CameraVCTest。

?

二、删除无用的控件和按钮。添加一个图片控件,ID为IDC_PICTURE,并为其添加CStatic类型变量m_picture。添加四个按钮,名称分别为预览,拍照,保存,关闭。ID分别为IDC_VIEW,IDC_TAKEPHOTO,IDC_SAVE,IDC_CLOSE。分别为其添加变量m_view,m_takePhoto,m_save,m_close。双击四个按钮,生成四个响应函数。

?

三、将DirectShow文件夹和CameraDS类的头文件和源文件拷贝到项目源文件夹下。并在项目属性VC++ Directories 添加include Directories,“.\DirectShow”。在Solution Exlporer添加上CameraDS类的头文件和源文件。

?

?

四、在CCameraVCTestDlg 类的头文件中,添加

?

#include "CameraDS.h"

#include

using namespace std;

?

添加公共变量和方法

vector cameralist;

CImage *m_pImage;

void ShowPicture(CImage *img,CStatic *pic);

void SaveCard(CImage *img);

int m_cam_count;

bool isBreak;

CString m_savePath;

CString savePath;

int m_saveIndex;

int m_fileCount;

?

?

在源文件的初始化函数里添加:

?

m_cam_count = CCameraDS::CameraCount();

cameralist.resize(m_cam_count);

if(! cameralist[m_cam_count-1].OpenCamera(m_cam_count-1, false, 640,480))

{

return FALSE;

}

this->isBreak = false;

this->isSave = false;

this->m_view.EnableWindow(TRUE);

this->m_takePhoto.EnableWindow(FALSE);

this->m_save.EnableWindow(FALSE);

?

?

添加如下函数

?

void DoEvents1()

{

MSG msg;

if (::PeekMessage(&msg,NULL,0,0,PM_REMOVE))

{

::TranslateMessage(&msg);

::DispatchMessage(&msg);

}

}

void CCameraVCTestDlg::OnBnClickedView()

{

// TODO: Add your control notification handler code here

this->isBreak = false;

this->m_view.EnableWindow(FALSE);

this->m_takePhoto.EnableWindow(TRUE);

this->m_save.EnableWindow(FALSE);

if(m_cam_count==0)

{

return ;

}//CImage imgShow;

//imgShow.Create(this->MAX_WIDTH,this->MAX_HEIGHT,24);

while(1)

{

DoEvents1();

if(isBreak)

{

break;

}

this->m_pImage = cameralist[m_cam_count-1].QueryFrame2();

this->ShowPicture(m_pImage,&this->m_picture);

//if(m_fileCount == 100)

//{

// this->isBreak = true;

// this->m_bnInit.EnableWindow(TRUE);

// this->m_bnTake.EnableWindow(FALSE);

// this->m_bnSave.EnableWindow(TRUE);

// break;

//}

if(isSave)

{

if(this->m_fileCount<100)

{

//this->m_ctrlProg.SetPos(m_fileCount);

CString path;

path.Format(L"%s\\%d.jpg",savePath,m_fileCount+m_saveIndex);

this->m_pImage->Save(path);

m_fileCount++;

}

else

{

isSave = false;

this->m_view.EnableWindow(FALSE);

this->m_takePhoto.EnableWindow(TRUE);

//this->m_save.EnableWindow(TRUE);

}

}

Sleep(150);

}

}

?

添加视频帧显示函数?

?

void CCameraVCTestDlg::ShowPicture(CImage *img,CStatic *pic)

{

if(img==NULL)

{

return ;

}

int width = img->GetWidth();

int height = img->GetHeight();

CRect picRect;

this->m_picture.GetClientRect(&picRect);

CRect rt(picRect);

CDC* dc = this->m_picture.GetDC();

CBrush *pBrush = CBrush::FromHandle((HBRUSH)GetStockObject(WHITE_BRUSH));

//dc->FillRect(rt, pBrush);

if(picRect.Height()*width > height*picRect.Width())

{

CPoint p1(0,(picRect.Height()-(picRect.Width()*height/width))/2);

CPoint p2(picRect.Width(),(picRect.Height() - p1.y));

rt.SetRect(p1,p2);

}

else

{

CPoint p1((picRect.Width()-(picRect.Height()*width/height))/2,0);

CPoint p2(picRect.Width()-p1.x,picRect.Height());

rt.SetRect(p1,p2);

}

// }

// this->ShowMouseCursor(CursorTag);

::SetStretchBltMode(dc->m_hDC,HALFTONE);

img->Draw(dc->m_hDC, rt);

}

?

添加关闭程序

?

void CCameraVCTestDlg::OnBnClickedClose()

{

this->isBreak = true;

this->OnClose();

}

?

?

好了,现在对话框上,点击预览按钮,就可以打开摄像头了,若还需要什么功能自己添加吧,如有问题,可以一起交流。欢迎不吝赐教。?

?

?

Camera.h

1 #ifndef POINTER_64 2 3 4 #if !defined(_MAC) && (defined(_M_MRX000) || defined(_M_AMD64) || defined(_M_IA64)) && (_MSC_VER >= 1100) && !(defined(MIDL_PASS) || defined(RC_INVOKED)) 5 #define POINTER_64 __ptr64 6 typedef unsigned __int64 POINTER_64_INT; 7 #if defined(_WIN64) 8 #define POINTER_32 __ptr32 9 #else 10 #define POINTER_32 11 #endif 12 #else 13 #if defined(_MAC) && defined(_MAC_INT_64) 14 #define POINTER_64 __ptr64 15 typedef unsigned __int64 POINTER_64_INT; 16 #else 17 #if (_MSC_VER >= 1300) && !(defined(MIDL_PASS) || defined(RC_INVOKED)) 18 #define POINTER_64 __ptr64 19 #else 20 #define POINTER_64 21 #endif 22 typedef unsigned long POINTER_64_INT; 23 #endif 24 #define POINTER_32 25 #endif 26 27 #endif 28 29 30 31 32 33 #ifndef CCAMERA_H 34 #define CCAMERA_H 35 36 #define WIN32_LEAN_AND_MEAN 37 38 #include 39 #include "qedit.h" 40 #include "dshow.h" 41 #include 42 //#include "cv.h" 43 44 //#include 45 46 #define MYFREEMEDIATYPE(mt) {if ((mt).cbFormat != 0) \ 47 {CoTaskMemFree((PVOID)(mt).pbFormat); \ 48 (mt).cbFormat =0; \ 49 (mt).pbFormat = NULL; \ 50 } \ 51 if ((mt).pUnk != NULL) \ 52 { \ 53 (mt).pUnk->Release(); \ 54 (mt).pUnk = NULL; \ 55 }} 56 57 58 class CCameraDS 59 { 60 private: 61 // IplImage * m_pFrame; 62 CImage m_image; 63 bool m_bConnected; 64 int m_nWidth; 65 int m_nHeight; 66 bool m_bLock; 67 bool m_bChanged; 68 long m_nBufferSize; 69 70 CComPtr m_pGraph; 71 CComPtr m_pDeviceFilter; 72 CComPtr m_pMediaControl; 73 CComPtr m_pSampleGrabberFilter; 74 CComPtr m_pSampleGrabber; 75 CComPtr m_pGrabberInput; 76 CComPtr m_pGrabberOutput; 77 CComPtr m_pCameraOutput; 78 CComPtr m_pMediaEvent; 79 CComPtr m_pNullFilter; 80 CComPtr m_pNullInputPin; 81 82 private: 83 bool BindFilter(int nCamIDX, IBaseFilter **pFilter); 84 void SetCrossBar(); 85 86 public: 87 CCameraDS(); 88 virtual~CCameraDS(); 89 90 //打开摄像头,nCamID指定打开哪个摄像头,取值可以为0,1,2,... 91 //bDisplayProperties指示是否自动弹出摄像头属性页 92 //nWidth和nHeight设置的摄像头的宽和高,如果摄像头不支持所设定的宽度和高度,则返回false 93 bool CCameraDS::OpenCamera(int nCamID, bool bDisplayProperties=true, int nWidth =320, int nHeight =240); 94 95 //关闭摄像头,析构函数会自动调用这个函数 96 void CloseCamera(); 97 98 //返回摄像头的数目 99 //可以不用创建CCameraDS实例,采用int c=CCameraDS::CameraCount();得到结果。100 staticint CameraCount(); 101 102 //根据摄像头的编号返回摄像头的名字103 //nCamID: 摄像头编号104 //sName: 用于存放摄像头名字的数组105 //nBufferSize: sName的大小106 //可以不用创建CCameraDS实例,采用CCameraDS::CameraName();得到结果。107 staticint CCameraDS::CameraName(int nCamID, char* sName, int nBufferSize);108 109 //返回图像宽度110 int GetWidth(){return m_nWidth;} 111 112 //返回图像高度113 int GetHeight(){return m_nHeight;}114 115 //抓取一帧,返回的IplImage不可手动释放!116 //返回图像数据的为RGB模式的Top-down(第一个字节为左上角像素),即IplImage::origin=0(IPL_ORIGIN_TL)117 // IplImage * QueryFrame();118 CImage* QueryFrame2(); 119 };120 121 #endif

camera.cpp

#include "stdafx.h"#include "CameraDS.h"#pragma comment(lib,"Strmiids.lib") //// Construction/Destruction//CCameraDS::CCameraDS(){ m_bConnected =false; m_nWidth =0; m_nHeight =0; m_bLock =false; m_bChanged =false;// m_pFrame = NULL; m_nBufferSize =0; m_pNullFilter = NULL; m_pMediaEvent = NULL; m_pSampleGrabberFilter = NULL; m_pGraph = NULL; CoInitialize(NULL);}CCameraDS::~CCameraDS(){ CloseCamera(); CoUninitialize();}void CCameraDS::CloseCamera(){ if(m_bConnected) m_pMediaControl->Stop(); m_pGraph = NULL; m_pDeviceFilter = NULL; m_pMediaControl = NULL; m_pSampleGrabberFilter = NULL; m_pSampleGrabber = NULL; m_pGrabberInput = NULL; m_pGrabberOutput = NULL; m_pCameraOutput = NULL; m_pMediaEvent = NULL; m_pNullFilter = NULL; m_pNullInputPin = NULL; //if (m_pFrame) // cvReleaseImage(&m_pFrame);if(!this->m_image.IsNull()) { this->m_image.Destroy(); } m_bConnected =false; m_nWidth =0; m_nHeight =0; m_bLock =false; m_bChanged =false; m_nBufferSize =0;}bool CCameraDS::OpenCamera(int nCamID, bool bDisplayProperties, int nWidth, int nHeight){ HRESULT hr = S_OK; CoInitialize(NULL); // Create the Filter Graph Manager. hr = CoCreateInstance(CLSID_FilterGraph, NULL, CLSCTX_INPROC, IID_IGraphBuilder, (void**)&m_pGraph); hr = CoCreateInstance(CLSID_SampleGrabber, NULL, CLSCTX_INPROC_SERVER, IID_IBaseFilter, (LPVOID *)&m_pSampleGrabberFilter); hr = m_pGraph->QueryInterface(IID_IMediaControl, (void**) &m_pMediaControl); hr = m_pGraph->QueryInterface(IID_IMediaEvent, (void**) &m_pMediaEvent); hr = CoCreateInstance(CLSID_NullRenderer, NULL, CLSCTX_INPROC_SERVER, IID_IBaseFilter, (LPVOID*) &m_pNullFilter); hr = m_pGraph->AddFilter(m_pNullFilter, L"NullRenderer"); hr = m_pSampleGrabberFilter->QueryInterface(IID_ISampleGrabber, (void**)&m_pSampleGrabber); AM_MEDIA_TYPE mt; ZeroMemory(&mt, sizeof(AM_MEDIA_TYPE)); mt.majortype = MEDIATYPE_Video; mt.subtype = MEDIASUBTYPE_RGB24; mt.formattype = FORMAT_VideoInfo; hr = m_pSampleGrabber->SetMediaType(&mt); MYFREEMEDIATYPE(mt); m_pGraph->AddFilter(m_pSampleGrabberFilter, L"Grabber"); // Bind Device Filter. We know the device because the id was passed in BindFilter(nCamID, &m_pDeviceFilter); m_pGraph->AddFilter(m_pDeviceFilter, NULL); CComPtr pEnum; m_pDeviceFilter->EnumPins(&pEnum); hr = pEnum->Reset(); hr = pEnum->Next(1, &m_pCameraOutput, NULL); pEnum = NULL; m_pSampleGrabberFilter->EnumPins(&pEnum); pEnum->Reset(); hr = pEnum->Next(1, &m_pGrabberInput, NULL); pEnum = NULL; m_pSampleGrabberFilter->EnumPins(&pEnum); pEnum->Reset(); pEnum->Skip(1); hr = pEnum->Next(1, &m_pGrabberOutput, NULL); pEnum = NULL; m_pNullFilter->EnumPins(&pEnum); pEnum->Reset(); hr = pEnum->Next(1, &m_pNullInputPin, NULL); //SetCrossBar(); if (bDisplayProperties) { CComPtr pPages; HRESULT hr = m_pCameraOutput->QueryInterface(IID_ISpecifyPropertyPages, (void**)&pPages); if (SUCCEEDED(hr)) { PIN_INFO PinInfo; m_pCameraOutput->QueryPinInfo(&PinInfo); CAUUID caGUID; pPages->GetPages(&caGUID); OleCreatePropertyFrame(NULL, 0, 0, L"Property Sheet", 1, (IUnknown **)&(m_pCameraOutput.p), caGUID.cElems, caGUID.pElems, 0, 0, NULL); CoTaskMemFree(caGUID.pElems); PinInfo.pFilter->Release(); } pPages = NULL; } else { //// 加入由 lWidth和lHeight设置的摄像头的宽和高 的功能,默认320*240 // by flymanbox @2009-01-24//int _Width = nWidth, _Height = nHeight; IAMStreamConfig* iconfig; iconfig = NULL; hr = m_pCameraOutput->QueryInterface(IID_IAMStreamConfig, (void**)&iconfig); AM_MEDIA_TYPE* pmt; if(iconfig->GetFormat(&pmt) !=S_OK) { //printf("GetFormat Failed ! \n");returnfalse; } VIDEOINFOHEADER* phead; if ( pmt->formattype == FORMAT_VideoInfo) { phead=( VIDEOINFOHEADER*)pmt->pbFormat; phead->bmiHeader.biWidth = _Width; phead->bmiHeader.biHeight = _Height; if(( hr=iconfig->SetFormat(pmt)) != S_OK ) { returnfalse; } } iconfig->Release(); iconfig=NULL; MYFREEMEDIATYPE(*pmt); } hr = m_pGraph->Connect(m_pCameraOutput, m_pGrabberInput); hr = m_pGraph->Connect(m_pGrabberOutput, m_pNullInputPin); if (FAILED(hr)) { switch(hr) { case VFW_S_NOPREVIEWPIN : break; case E_FAIL : break; case E_INVALIDARG : break; case E_POINTER : break; } } m_pSampleGrabber->SetBufferSamples(TRUE); m_pSampleGrabber->SetOneShot(TRUE); hr = m_pSampleGrabber->GetConnectedMediaType(&mt); if(FAILED(hr)) returnfalse; VIDEOINFOHEADER *videoHeader; videoHeader = reinterpret_cast(mt.pbFormat); m_nWidth = videoHeader->bmiHeader.biWidth; m_nHeight = videoHeader->bmiHeader.biHeight; m_bConnected =true; pEnum = NULL; returntrue;}

bool CCameraDS::BindFilter(int nCamID, IBaseFilter **pFilter){ if (nCamID <0) returnfalse; // enumerate all video capture devices CComPtr pCreateDevEnum; HRESULT hr = CoCreateInstance(CLSID_SystemDeviceEnum, NULL, CLSCTX_INPROC_SERVER, IID_ICreateDevEnum, (void**)&pCreateDevEnum); if (hr != NOERROR) { returnfalse; } CComPtr pEm; hr = pCreateDevEnum->CreateClassEnumerator(CLSID_VideoInputDeviceCategory, &pEm, 0); if (hr != NOERROR) { returnfalse; } pEm->Reset(); ULONG cFetched; IMoniker *pM; int index =0; while(hr = pEm->Next(1, &pM, &cFetched), hr==S_OK, index <= nCamID) { IPropertyBag *pBag; hr = pM->BindToStorage(0, 0, IID_IPropertyBag, (void**)&pBag); if(SUCCEEDED(hr)) { VARIANT var; var.vt = VT_BSTR; hr = pBag->Read(L"FriendlyName", &var, NULL); if (hr == NOERROR) { if (index == nCamID) { pM->BindToObject(0, 0, IID_IBaseFilter, (void**)pFilter); } SysFreeString(var.bstrVal); } pBag->Release(); } pM->Release(); index++; } pCreateDevEnum = NULL; returntrue;}//将输入crossbar变成PhysConn_Video_Compositevoid CCameraDS::SetCrossBar(){ int i; IAMCrossbar *pXBar1 = NULL; ICaptureGraphBuilder2 *pBuilder = NULL; HRESULT hr = CoCreateInstance(CLSID_CaptureGraphBuilder2, NULL, CLSCTX_INPROC_SERVER, IID_ICaptureGraphBuilder2, (void**)&pBuilder); if (SUCCEEDED(hr)) { hr = pBuilder->SetFiltergraph(m_pGraph); } hr = pBuilder->FindInterface(&LOOK_UPSTREAM_ONLY, NULL, m_pDeviceFilter,IID_IAMCrossbar, (void**)&pXBar1); if (SUCCEEDED(hr)) { long OutputPinCount; long InputPinCount; long PinIndexRelated; long PhysicalType; long inPort =0; long outPort =0; pXBar1->get_PinCounts(&OutputPinCount,&InputPinCount); for( i =0;iget_CrossbarPinInfo(TRUE,i,&PinIndexRelated,&PhysicalType); if(PhysConn_Video_Composite==PhysicalType) { inPort = i; break; } } for( i =0;iget_CrossbarPinInfo(FALSE,i,&PinIndexRelated,&PhysicalType); if(PhysConn_Video_VideoDecoder==PhysicalType) { outPort = i; break; } } if(S_OK==pXBar1->CanRoute(outPort,inPort)) { pXBar1->Route(outPort,inPort); } pXBar1->Release(); } pBuilder->Release();}/*The returned image can not be released.//*///IplImage* CCameraDS::QueryFrame()//{//// long evCode;// long size = 0;//// m_pMediaControl->Run();// m_pMediaEvent->WaitForCompletion(INFINITE, &evCode);// // m_pSampleGrabber->GetCurrentBuffer(&size, NULL);//////if the buffer size changed// if (size != m_nBufferSize)// {// if (m_pFrame)// cvReleaseImage(&m_pFrame);//// m_nBufferSize = size;// m_pFrame = cvCreateImage(cvSize(m_nWidth, m_nHeight), IPL_DEPTH_8U, 3);// }// if (m_pFrame == NULL) return NULL;// m_pSampleGrabber->GetCurrentBuffer(&m_nBufferSize, (long*)m_pFrame->imageData);// cvFlip(m_pFrame);//// return m_pFrame;//}CImage* CCameraDS::QueryFrame2(){ /*if(!m_image.IsNull()) { m_image.Destroy(); }*/ long evCode; long size =0; m_pMediaControl->Run(); m_pMediaEvent->WaitForCompletion(INFINITE, &evCode); m_pSampleGrabber->GetCurrentBuffer(&size, NULL); //if the buffer size changedif (size != m_nBufferSize) { //if (m_pImage != NULL )//|| !m_pImage->IsNull()) //{ // m_pImage->Destroy(); //} // cvReleaseImage(&m_pFrame); m_nBufferSize = size; //m_pFrame = cvCreateImage(cvSize(m_nWidth, m_nHeight), IPL_DEPTH_8U, 3); m_image.Create(m_nWidth,m_nHeight,24); } //if (m_pFrame == NULL) return NULL;if(m_image.IsNull()) { return0; } byte*q; byte*p =newbyte[m_nWidth*m_nHeight*3]; //m_pSampleGrabber->GetCurrentBuffer(&m_nBufferSize, (long*)m_pFrame->imageData); m_pSampleGrabber->GetCurrentBuffer(&m_nBufferSize, (long*)p);// cvFlip(m_pFrame); //for(int y=0, z=m_nHeight-1; y=0; y++,z--) { q = (byte*)m_image.GetPixelAddress(0,z); memcpy(q,&p[m_nWidth*3*y],m_nWidth*3); } delete []p; return&m_image;}int CCameraDS::CameraCount(){ int count =0; CoInitialize(NULL); // enumerate all video capture devices CComPtr pCreateDevEnum; HRESULT hr = CoCreateInstance(CLSID_SystemDeviceEnum, NULL, CLSCTX_INPROC_SERVER, IID_ICreateDevEnum, (void**)&pCreateDevEnum); CComPtr pEm; hr = pCreateDevEnum->CreateClassEnumerator(CLSID_VideoInputDeviceCategory, &pEm, 0); if (hr != NOERROR) { return count; } pEm->Reset(); ULONG cFetched; IMoniker *pM; while(hr = pEm->Next(1, &pM, &cFetched), hr==S_OK) { count++; } pCreateDevEnum = NULL; pEm = NULL; return count;}int CCameraDS::CameraName(int nCamID, char* sName, int nBufferSize){ int count =0; CoInitialize(NULL); // enumerate all video capture devices CComPtr pCreateDevEnum; HRESULT hr = CoCreateInstance(CLSID_SystemDeviceEnum, NULL, CLSCTX_INPROC_SERVER, IID_ICreateDevEnum, (void**)&pCreateDevEnum); CComPtr pEm; hr = pCreateDevEnum->CreateClassEnumerator(CLSID_VideoInputDeviceCategory, &pEm, 0); if (hr != NOERROR) return0; pEm->Reset(); ULONG cFetched; IMoniker *pM; while(hr = pEm->Next(1, &pM, &cFetched), hr==S_OK) { if (count == nCamID) { IPropertyBag *pBag=0; hr = pM->BindToStorage(0, 0, IID_IPropertyBag, (void**)&pBag); if(SUCCEEDED(hr)) { VARIANT var; var.vt = VT_BSTR; hr = pBag->Read(L"FriendlyName", &var, NULL); //还有其他属性,像描述信息等等...if(hr == NOERROR) { //获取设备名称 WideCharToMultiByte(CP_ACP,0,var.bstrVal,-1,sName, nBufferSize ,"",NULL); SysFreeString(var.bstrVal); } pBag->Release(); } pM->Release(); break; } count++; } pCreateDevEnum = NULL; pEm = NULL; return1;}

CImage读取8位灰度图像数据

今天收到了一个任务,就是将4张256X256的小图拼成一个大图,然后再缩小成256X256的小图,图像均是8位的灰度图。网上查了一下资料,决定采用CImage类来实现。

首先利用CImage的Load函数读取磁盘上的4个图像文件。然后创建一个512X512的图像,利用GetBits函数获取图像数据指针将前四个图像的数据复制到创建图像的指定位置,最后调用Save函数保存图像,看看效果。

大致代码如下:

CImage imgs[4];

imgs[0].Load(file0);

imgs[1].Load(file1);

imgs[2].Load(file2);

imgs[3].Load(file3);

CImage image;

image.Create(512, 512);

BYTE* bits = image.GetBits();

SetRectBits(image.GetBits(), 0, 0, 256, 256, imgs[0].GetBits());

SetRectBits(image.GetBits(), 256, 0, 256, 256, imgs[1].GetBits());

SetRectBits(image.GetBits(), 0, 256, 256, 256, imgs[2].GetBits());

SetRectBits(image.GetBits(), 256, 256, 256, 256, imgs[3].GetBits());

image.Save(file);

// 将数据复制到目标的矩形区域内, x, y 是矩形的左上角坐标, width是矩形的宽, height是矩形的高

void SetRectBits(BYTE* src, int x, int y, int width, int height, BYTE* dst)

{

for (int i = y; i < y + height; ++i)

{

memcpy(dst + srcWidth * i + x, src + (i - y) * width, width);

}

}

程序运行中断,中断在memcpy上,目标不可写。猜测问题出现在获取图像数据指针上。查找资料后发现,GetBits获取的指针并不一定是图像的开始位置,必须根据GetPitch来判断,如果返回值为正,则GetBits返回的指针指向图像数据的开头,如果为负,则指向图像数据最后一行的开头。

BYTE* bits = (BYTE*)image.GetBits() + (image.GetPitch()*(image.GetHeight() - 1));

再次运行程序,运行正常,但打开生成的图像发现图像全黑。调试程序,查看image的内存内容,图像数据确实写进去了,所以猜测问题出现在CImage图像数据的表示上。再次查资料,发现8位图像时,CImage图像数据是调色板的索引值。问题就出现在调色板上,调色程序查看image的调色板数据,发现均为0,调色板未设置,所以需要设置调色板的值。

RGBQUAD colors[256];

image.GetColorTable(0, image.GetMaxColorTableEntries(), colors);

for (int i = 0; i < 256; i++)

{

colors[i].rgbBlue = (BYTE)i;

colors[i].rgbGreen = (BYTE)i;

colors[i].rgbRed = (BYTE)i;

colors[i].rgbReserved = 0;

}

image.SetColorTable(0, image.GetMaxColorTableEntries(), colors);

再次编译运行程序,然后打开生成的图像,显示正常,问题解决。

接下来就是图像的缩小了。 采用的是双线性插值算法。就是根据目标图像像素的x0,y0坐标计算从其相对于源图像中的x1,y1坐标,得到的坐标值会是个浮点数,在获取这个x,y坐标周围的4个像素值,由这4个像素到x1,y1的距离作权值来计算加权平均值,最后复制给x0,y0坐标的像素,循环上述操作,直至目标图像所有像素都填充完。大致代码如下:

CImage dstImage;

dstImage.Create(256, 256);

/*设置调色板略*/

BYTE* dst = dstImage.GetBits();

int srcWidth = image.GetWidth();

int srcHeight = image.GetHeight();

BYTE* src = (BYTE*)image.m_GetBits();

float x0 = 0;

float y0 = 0;

int x1 = 0;

int y1 = 0;

float f1 = 0.0;

int x2 = 0;

int y2 = 0;

float f2 = 0.0;

int x3 = 0;

int y3 = 0;

float f3 = 0.0;

int x4 = 0;

int y4 = 0;

float f4 = 0.0;

for (int i = 0; i < height; i++)

{

for (int j = 0; j < width; ++j)

{

x0 = (float)j * (float)srcWidth / (float)width;

y0 = (float)i * (float)srcHeight / (float)height;

x1 = (int)x0;

y1 = (int)y0;

f1 = (x0 - (float)x1) * (y0 - (float)y1);

x2 = x1;

if (x1 + 1 < srcWidth)

{

++x2;

}

y2 = y1;

f2 = (1 - (x0 - (float)x1)) * (y0 - (float)y1);

x3 = x1;

y3 = y1;

if (y1 + 1 < srcHeight)

{

++y3;

}

f3 = (x0 - (float)x1) * (1 - (y0 - (float)y1));

x4 = x1;

if (x1 + 1 < srcWidth)

{

++x4;

}

y4 = y1;

if (y1 + 1 < srcHeight)

{

++y4;

}

f4 = (1 - (x0 - (float)x1)) * (1 - (y0 - (float)y1));

*((dst + i * width + j) = ((float)*(src + y1 * srcWidth + x1)) * f1 +

((float)*(src + y2 * srcWidth + x2)) * f2 +

((float)*(src + y3 * srcWidth + x3)) * f3 +

((float)*(src + y4 * srcWidth + x4)) * f4;

}

}

dstImage.Save(dstFile);

任务解决。

后来发现图像时按比例缩小的,这种情况下双线性插值就变成临插法了,结果图像严重失真,所以直接取周围像素的平均值就可以了:

*((dst + i * width + j) = ((src + y1 * srcWidth + x1) +

(src + y2 * srcWidth + x2) +

(src + y3 * srcWidth + x3) +

(src + y4 * srcWidth + x4)) / 4;

#vs2010保存相机取到的图片代码_使用VC和DirectShow从摄像头中读取图像(一)

0条大神的评论

发表评论