We are developing a POS APP using xamarin.forms, in that we need to print the receipt to an esc/pos thermal printer connected via LAN.
We have multi language support with the App, printing multiple language with the esc/pos commands by changing code page works perfectly.
But its working for some supported language only, for other language its printing garbage characters(unreadable ones).
so we thought of creating a pdf for the receipt and print that one. we tried to create the pdf and then convert to bitmap and then send to the printer by using esc pos commands, but its not printing anything.
public BitImage(String filename)
{
Java.IO.File file = new Java.IO.File(filename);
var pdfRenderer = new PdfRenderer(ParcelFileDescriptor.Open(file, ParcelFileMode.ReadOnly));
PdfRenderer.Page page = pdfRenderer.OpenPage(0);
Bitmap bmp = Bitmap.CreateBitmap(page.Width, page.Height, Bitmap.Config.Argb8888);
page.Render(bmp, null, null, PdfRenderMode.ForPrint);
load(bmp);
}
private void load(Bitmap bmp)
{
int w = bmp.Width;
int h = bmp.Height;
int bw = (w + 7) / 8;
if (bw > 255)
bw = 255;
int bh = h / 8;
if (bh > 24)
{
bh = 24;
}
initData(bw * 8, bh * 8);
for (int y = 0; y < h; y++)
{
for (int x = 0; x < w; x++)
{
if (bmp.GetPixel(x, y) == Color.Black)
setPixel(x, y);
}
}
}
private void initData(int w, int h)
{
width = w;
height = h;
pitch = h / 8;
data = new byte[w * pitch];
}
private void setPixel(int x, int y)
{
if (x >= width || y >= height)
{
return;
}
int mask = (0x0080 >> (y % 8));
data[(x * pitch) + (y / 8)] |= (byte)mask;
}
public void PrintData()
{
byte[] CMD_INIT = { 0x1B, 0x40 };
byte[] CMD_UPLOAD_IMAGE = { 0x1D, 0x2A, 0, 0 };
byte[] CMD_PRINT_IMAGE = { 0x1D, 0x2F, 0 };
byte[] CMD_CUT = { 0x1D, 0x56, 0x01 };
CMD_UPLOAD_IMAGE[2] = (byte)(width / 8);
CMD_UPLOAD_IMAGE[3] = (byte)(height / 8);
#region Print Via Lan
Socket pSocket = new Socket(SocketType.Stream, ProtocolType.IP);
pSocket.SendTimeout = 1500;
pSocket.Connect("192.168.15.168", 9100);
pSocket.Send(CMD_INIT);
pSocket.Send(CMD_UPLOAD_IMAGE);
pSocket.Send(data);
pSocket.Send(CMD_PRINT_IMAGE);
pSocket.Send(CMD_CUT);
pSocket.Close();
#endregion
}
Please help me, whether i am doing it in correct way?
or is there any better way to do the same?
You can use libraries like SkiaSharp to make Image/PDF from your data in any language and print them properly using any printer.
I've created a sample to demonstrate how to print images properly with ESC\POS printers in C#: GitHub code repo
Related
I am a newbie in point cloud library and currently I have a problem like the title.I've been debugging in debug mode before, but now I need to release so I'm debugging in release mode for the first time.When I get an error when I load the point cloud using the code below
viewer->removeAllPointClouds();
viewer->removeAllShapes();
viewer->addPointCloud<pcl::PointXYZRGB>(temcloud_ptr,"point cloud");
viewer->setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 1, "cloud");
viewer->resetCamera();
In viewer->addpointcloud<pcl::PointXYZRGB>(temcloud_ptr,"point cloud");this code,will occur this error :An exception was raised: a read access permission conflict.this is 0xFFFFFFFFFFFFFFDF. and stop here code from xstring screen .
_CONSTEXPR20_CONTAINER bool _Large_string_engaged() const noexcept {
#ifdef __cpp_lib_constexpr_string
if (_STD is_constant_evaluated()) {
return true;
}
#endif // __cpp_lib_constexpr_string
return _BUF_SIZE <= _Myres;
Here's my code
MainWindow::MainWindow(QWidget *parent)
: QMainWindow(parent)
{
ui.setupUi(this);
auto renderer = vtkSmartPointer<vtkRenderer>::New();
auto renderWindow = vtkSmartPointer<vtkGenericOpenGLRenderWindow>::New();
renderWindow->AddRenderer(renderer);
viewer.reset(new pcl::visualization::PCLVisualizer(renderer,
renderWindow,
"viewer",
false));
ui.qvtkwidget->setRenderWindow(viewer->getRenderWindow());
viewer->setupInteractor(ui.qvtkwidget->interactor(),
ui.qvtkwidget->renderWindow());
viewer->setBackgroundColor(0.8941176, 1, 0.9137254);
cloud_ptr.reset(new pcl::PointCloud<PointXYZRGBI>);
cloudrgb_ptr.reset(new pcl::PointCloud<pcl::PointXYZRGB>);
cloudi_ptr.reset(new pcl::PointCloud<pcl::PointXYZI>);
}
void MainWindow::openFile()
{
QString FileName = QFileDialog::getOpenFileName(this,
tr("Open Resource"),
".",
tr("File1(*.las);;File2(*.txt);;File3(*.pcd);;File4(*.ply);;File5(*.obj)"));
if (FileName.isEmpty())
return;
const std::string sstr = FileName.toLocal8Bit();
if (FileName != "")
{
Openfile of;
if (FileName.endsWith("las"))
{
const char* cc_FileName = sstr.c_str();
std::ifstream ifs;
ifs.open(cc_FileName, std::ios::in | std::ios::binary);
liblas::ReaderFactory f;
liblas::Reader reader = f.CreateWithStream(ifs);
liblas::Header const& header = reader.GetHeader();
int pointsNum = header.GetPointRecordsCount();
cloudrgb_ptr->width = pointsNum;
cloudrgb_ptr->height = 1;
cloudrgb_ptr->resize(pointsNum);
for (int i = 0; i < pointsNum; ++i)
{
reader.ReadNextPoint();
const liblas::Point& p = reader.GetPoint();
float px = (float)p.GetX();
float py = (float)p.GetY();
float pz = (float)p.GetZ();
float pi = (float)p.GetIntensity();
uint16_t pr = p.GetColor().GetRed();
uint16_t pg = p.GetColor().GetGreen();
uint16_t pb = p.GetColor().GetBlue();
int r2 = ceil(((float)pr / 65536) * (float)256);
int g2 = ceil(((float)pg / 65536) * (float)256);
int b2 = ceil(((float)pb / 65536) * (float)256);
uint32_t prgb = ((int)r2) << 16 | ((int)g2) << 8 | ((int)b2);
cloudrgb_ptr->points[i].rgb = *reinterpret_cast<float*>(&prgb);
(*cloudrgb_ptr)[i].x = px;
(*cloudrgb_ptr)[i].y = py;
(*cloudrgb_ptr)[i].z = pz;
}
ifs.close();
}
viewer->removeAllPointClouds();
viewer->removeAllShapes();
viewer->addPointCloud<pcl::PointXYZRGB>(cloudrgb_ptr,"point cloud");
viewer->setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 1, "cloud");
viewer->resetCamera();
ui.qvtkwidget->update();
}
At first I thought the string was not properly initialized so I tried to change the second parameters of addpointcloud such as
const std::string ptcd{ "point cloud" }; const char* arr = "point cloud";
but there is no effect.
I just bought a 8x32 lattice board (led matrix) and I control it with Arduino. The problem is that I can only use text with the library I got on github. But not numbers, how can I do it?
I'm going to put the code below, the code of the scrolling text and the part of the code in the library that specifies the function used to set the text.
The arduino code that program the scrolling text is here:
#include <HT1632.h>
#include <font_5x4.h>
#include <images.h>
int i = 0;
int wd;
char disp[] = "Hello, how are you?";
int x = 10;
void setup() {
HT1632.begin(A5, A4, A3);
wd = HT1632.getTextWidth(disp, FONT_5X4_END, FONT_5X4_HEIGHT);
}
void loop() {
HT1632.renderTarget(1);
HT1632.clear();
HT1632.drawText(disp, OUT_SIZE - i, 2, FONT_5X4, FONT_5X4_END,
FONT_5X4_HEIGHT);
HT1632.render();
i = (i + 1) % (wd + OUT_SIZE);
delay(100);
}
The library code that specifies the printing of the text is this:
void HT1632Class::drawText(const char text[], int x, int y, const byte font[],
int font_end[], uint8_t font_height,
uint8_t gutter_space) {
int curr_x = x;
char i = 0;
char currchar;
// Check if string is within y-bounds
if (y + font_height < 0 || y >= COM_SIZE)
return;
while (true) {
if (text[i] == '\0')
return;
currchar = text[i] - 32;
if (currchar >= 65 &&
currchar <=
90) // If character is lower-case, automatically make it upper-case
currchar -= 32; // Make this character uppercase.
if (currchar < 0 || currchar >= 64) { // If out of bounds, skip
++i;
continue; // Skip this character.
}
// Check to see if character is not too far right.
if (curr_x >= OUT_SIZE)
break; // Stop rendering - all other characters are no longer within the
// screen
// Check to see if character is not too far left.
int chr_width = getCharWidth(font_end, font_height, currchar);
if (curr_x + chr_width + gutter_space >= 0) {
drawImage(font, chr_width, font_height, curr_x, y,
getCharOffset(font_end, currchar));
// Draw the gutter space
for (char j = 0; j < gutter_space; ++j)
drawImage(font, 1, font_height, curr_x + chr_width + j, y, 0);
}
curr_x += chr_width + gutter_space;
++i;
}
}
You need to look at snprintf. This allows you to format a string of characters just like printf. It allows you to convert something like a int into a part of a string.
an example:
int hour = 10;
int minutes = 50;
char buffer[60];
int status = snprintf(buffer, 60, "the current time is: %i:%i\n", hour, minutes);
buffer now contains:"the current time is: 10:50" (and several empty characters past the \0).
help please, at Arduino Uno I receive a signal from the sensor and build a graph using processing 2.2.1, but you need to scale up without losing proportions. My attempts failed, the proportion was crumbling(I tried to multiply the values) Code:
Serial myPort;
int xPos = 1;
int yPos = 100;
float yOld = 0;
float yNew = 0;
float inByte = 0;
int lastS = 0;
PFont f;
void setup () {
size(1200, 500);
println(Serial.list());
myPort = new Serial(this, Serial.list()[0], 9600);
myPort.bufferUntil('\n');
background(0xff);
}
void draw () {
int s = second();
PFont f = createFont("Arial",9,false);
textFont(f,9);
fill(0);
if (s != lastS){
stroke(0xcc, 0xcc, 0xcc);
line(xPos, yPos+10, xPos, yPos+30);
text(s + " Sec.", xPos+5, yPos+30);
lastS = s;
}
}
void mousePressed(){
save(lastS + "-heart.jpg");
}
void serialEvent (Serial myPort) {
String inString = myPort.readStringUntil('\n');
if (inString != null) {
inString = trim(inString);
if (inString.equals("!")) {
stroke(0, 0, 0xff); // blue
inByte = 1023;
} else {
stroke(0xff, 0, 0); //Set stroke to red ( R, G, B)
inByte = float(inString);
}
inByte = map(inByte, 0, 1023, 0, height);
yNew = inByte;
line(xPos-1, yPos-yOld, xPos, yPos-yNew);
yOld = yNew;
if (xPos >= width) {
xPos = 1;
yPos+=200;
if (yPos > height-200){
xPos = 1;
yPos=100;
background(0xff);
}
} else {
xPos++;
}
}
}
There are multiple ways to scale graphics.
A simple method to try is to simply scale() the rendering (drawing coordinate system).
Bare in mind currently the buffer is only cleared when the xPos reaches the right hand side of the screen.
The value from Arduino is mapped to Processing here:
inByte = map(inByte, 0, 1023, 0, height);
yNew = inByte;
you should try to map change height to a different value as you see fit.
This however will scale only the Y value. The x value is incremented here:
xPos++;
you might want to change this increment to a different value that works with the proportion you are trying maintain between x and y.
I am using lis3mdl magnetometer in my quadcopter project to compensate gyroscope drift. Unfortunatelly Im having problems probably with calibrating.
Ive achive max and min values (what is weird they are 14 bits instead of 16) and calculated biases like that :
biases[i] = (MAX_VALUES[i]+MIN_VALUES[i])/2;
(where i represent each of 3 axis).
Ive substracted biases from raw values x = (double)new_x-biases[0]; (etc), and then wanted to calculate heading like that :
heading = atan2(x,y);
heading += declinationAngle;
where declination angle is calculated.
Outcome are angles (conversion from radians heading*(180/M_PI)), and it does change when Iam rotating quad in yaw axi, BUT when I am rotating it in roll and pitch axi value change either. I want to achive stable yaw value which does not change when Iam rotating object in other axis. Maybe some type of fusing with accelerometer?
I am not sure when Ive made mistake in my calculations...
Whole class:
class Magnetometer {
int x=0,y=0,z=0;
LIS3MDL mag;
int running_min[3] = {32767, 32767, 32767}, running_max[3] = {-32768, -32768, -32768};
double mag_norm = 0.0;
double declinationAngle = 0.0;
double heading=0.0;
const int MAX_VALUES[3] = {3014,3439,10246};
const int MIN_VALUES[3] = {-4746, -4110, 492};
double biases[3] = {0.0};
double scales[3] = {0.0};
double avg_scale = 0.0;
ButterworthDLPF xyz_filter[3];
double DLPF_ON = true;
const float sample_rate = MAG_SAMPLE_RATE;
const float cutoff_freq = 4.0;
public:
Magnetometer() {}
void Init() {
declinationAngle = (6.0 + (8.0 / 60.0)) / (180.0 / M_PI);
for(int i=0; i<3; i++) {
biases[i] = (MAX_VALUES[i]+MIN_VALUES[i])/2;
scales[i] = (MAX_VALUES[i]-MIN_VALUES[i])/2;
}
avg_scale = (scales[0]+scales[1]+scales[2])/3.0;
for(int i=0; i<3; i++) scales[i] = avg_scale / scales[i];
Serial.println("Turning on magnetometer. . .");
if(!mag.init()) {
Serial.println("Failed to detect magnetometer!");
ESP.restart();
}
mag.enableDefault();
//Calibrate();
for(int i=0; i<3; i++) xyz_filter[i].Init(sample_rate, cutoff_freq);
Serial.println("9DOF readdy!");
}
void Calibrate() {
delay(100);
while(true) {
mag.read();
if(running_max[0]<mag.m.x) running_max[0] = mag.m.x;
if(running_max[1]<mag.m.y) running_max[1] = mag.m.y;
if(running_max[2]<mag.m.z) running_max[2] = mag.m.z;
if(running_min[0]>mag.m.x) running_min[0] = mag.m.x;
if(running_min[1]>mag.m.y) running_min[1] = mag.m.y;
if(running_min[2]>mag.m.z) running_min[2] = mag.m.z;
Serial.println((String)running_max[0]+" "+(String)running_max[1]+" "+(String)running_max[2]+ " "+(String)running_min[0] +" "+(String)running_min[1]+" "+(String)running_min[2]);
delay(20);
}
}
void Update(){
mag.read();
xyz_filter[0].Update(mag.m.x);
xyz_filter[1].Update(mag.m.y);
xyz_filter[2].Update(mag.m.z);
//Serial.println(xyz_filter[0].getData());
/*x = ((double)xyz_filter[0].getData()-biases[0])*scales[0];
y = ((double)xyz_filter[1].getData()-biases[1])*scales[1];
z = ((double)xyz_filter[2].getData()-biases[2])*scales[2];*/
x = ((double)mag.m.x-biases[0])*scales[0];
y = ((double)mag.m.y-biases[1])*scales[1];
z = ((double)mag.m.z-biases[2])*scales[2];
CalculateHeading();
}
void CalculateHeading() {
heading = atan2(y,x);
heading += declinationAngle;
//if(heading<0) heading += 2*PI;
//else if(heading>2*PI) heading -= 2*PI;
heading=MOD(heading*(180/M_PI));
}
double GetHeading() {return heading;}
void ShowRawValues(bool names=false) {
if(names) Serial.print("X: "+(String)x+" Y: "+ (String)y+ " Z: " + (String)z);
else Serial.print((String)x+" "+ (String)y+ " " + (String)z);
}
};
I would like to display image in qt window , so I used Qlabel->setpixmap
but how can I convert from IPLImage to QImage to display it in the label??
I found the follwing function to convert it but I did not know how to use it in call statement
QImage *IplImageToQImage(const IplImage * iplImage, uchar **data, double mini, double maxi)
{
uchar *qImageBuffer = NULL;
int width = iplImage->width;
int widthStep = iplImage->widthStep;
int height = iplImage->height;
switch (iplImage->depth)
{
case IPL_DEPTH_8U:
if (iplImage->nChannels == 1)
{
// OpenCV image is stored with one byte grey pixel. We convert it
// to an 8 bit depth QImage.
//
qImageBuffer = (uchar *) malloc(width*height*sizeof(uchar));
uchar *QImagePtr = qImageBuffer;
const uchar *iplImagePtr = (const uchar *) iplImage->imageData;
for (int y = 0; y < height; y++)
{
// Copy line by line
memcpy(QImagePtr, iplImagePtr, width);
QImagePtr += width;
iplImagePtr += widthStep;
}
}
else if (iplImage->nChannels == 3)
{
/* OpenCV image is stored with 3 byte color pixels (3 channels).
We convert it to a 32 bit depth QImage.
*/
qImageBuffer = (uchar *) malloc(width*height*4*sizeof(uchar));
uchar *QImagePtr = qImageBuffer;
const uchar *iplImagePtr = (const uchar *) iplImage->imageData;
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
// We cannot help but copy manually.
QImagePtr[0] = iplImagePtr[0];
QImagePtr[1] = iplImagePtr[1];
QImagePtr[2] = iplImagePtr[2];
QImagePtr[3] = 0;
QImagePtr += 4;
iplImagePtr += 3;
}
iplImagePtr += widthStep-3*width;
}
}
else
{
qDebug("IplImageToQImage: image format is not supported : depth=8U and %d channels\n", iplImage->nChannels);
}
break;
case IPL_DEPTH_16U:
if (iplImage->nChannels == 1)
{
/* OpenCV image is stored with 2 bytes grey pixel. We convert it
to an 8 bit depth QImage.
*/
qImageBuffer = (uchar *) malloc(width*height*sizeof(uchar));
uchar *QImagePtr = qImageBuffer;
//const uint16_t *iplImagePtr = (const uint16_t *);
const unsigned int *iplImagePtr = (const unsigned int *)iplImage->imageData;
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
// We take only the highest part of the 16 bit value. It is
//similar to dividing by 256.
*QImagePtr++ = ((*iplImagePtr++) >> 8);
}
iplImagePtr += widthStep/sizeof(unsigned int)-width;
}
}
else
{
qDebug("IplImageToQImage: image format is not supported : depth=16U and %d channels\n", iplImage->nChannels);
}
break;
case IPL_DEPTH_32F:
if (iplImage->nChannels == 1)
{
/* OpenCV image is stored with float (4 bytes) grey pixel. We
convert it to an 8 bit depth QImage.
*/
qImageBuffer = (uchar *) malloc(width*height*sizeof(uchar));
uchar *QImagePtr = qImageBuffer;
const float *iplImagePtr = (const float *) iplImage->imageData;
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
uchar p;
float pf = 255 * ((*iplImagePtr++) - mini) / (maxi - mini);
if (pf < 0) p = 0;
else if (pf > 255) p = 255;
else p = (uchar) pf;
*QImagePtr++ = p;
}
iplImagePtr += widthStep/sizeof(float)-width;
}
}
else
{
qDebug("IplImageToQImage: image format is not supported : depth=32F and %d channels\n", iplImage->nChannels);
}
break;
case IPL_DEPTH_64F:
if (iplImage->nChannels == 1)
{
/* OpenCV image is stored with double (8 bytes) grey pixel. We
convert it to an 8 bit depth QImage.
*/
qImageBuffer = (uchar *) malloc(width*height*sizeof(uchar));
uchar *QImagePtr = qImageBuffer;
const double *iplImagePtr = (const double *) iplImage->imageData;
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
uchar p;
double pf = 255 * ((*iplImagePtr++) - mini) / (maxi - mini);
if (pf < 0) p = 0;
else if (pf > 255) p = 255;
else p = (uchar) pf;
*QImagePtr++ = p;
}
}
}
else
{
qDebug("IplImageToQImage: image format is not supported : depth=64F and %d channels\n", iplImage->nChannels);
}
break;
default:
qDebug("IplImageToQImage: image format is not supported : depth=%d and %d channels\n", iplImage->depth, iplImage->nChannels);
}
QImage *qImage;
QVector<QRgb> vcolorTable;
if (iplImage->nChannels == 1)
{
// We should check who is going to destroy this allocation.
QRgb *colorTable = new QRgb[256];
for (int i = 0; i < 256; i++)
{
colorTable[i] = qRgb(i, i, i);
vcolorTable[i] = colorTable[i];
}
qImage = new QImage(qImageBuffer, width, height, QImage::Format_Indexed8);
qImage->setColorTable(vcolorTable);
}
else
{
qImage = new QImage(qImageBuffer, width, height, QImage::Format_RGB32);
}
*data = qImageBuffer;
return qImage;
}
The parameter was:
const IplImage * iplImage, uchar **data, double mini, double maxi
what are data,mini,max? how can I get it from my IPLImage to use it in call statement?
Thanks alot :)
Looks like data is not used by the code, and mini and maxi are used for converting floating point values that certain image formats use to integer values in the range 0-255.
I'd try using NULL for data. mini and maxi really depend on the image data, and I don't know what reasonable ranges are. But if your IplImage is not stored as floating point values then these values shouldn't make any difference.
You can simply create a QImage where the data is owned by something else (eg the IPLImage) using the QImage(data,widht,height,format) and data is the IPLImage data ptr as long as the format isthe samein both QImage and IPLImage (eg RGB888 = 8U_C3)
I have found some bugs in the code.... maybe there are still more bugs in it but for now it looks fine for me. QImage with Format_Index8 needs sometimes (depending on the image resolution....) 2 byte added on the right side (don t know why but it seems like to be like this).
Here is the new adapted code
QImage *IplImageToQImage(const IplImage * iplImage, uchar **data, double mini, double maxi)
{
uchar *qImageBuffer = NULL;
int width = iplImage->width;
int widthStep = iplImage->widthStep;
int height = iplImage->height;
QImage *qImage;
switch (iplImage->depth)
{
case IPL_DEPTH_8U:
if (iplImage->nChannels == 1)
{
// OpenCV image is stored with one byte grey pixel. We convert it
// to an 8 bit depth QImage.
qImage = new QImage(width,height,QImage::Format_Indexed8);
uchar *QImagePtr = qImage->scanLine(0);
qImageBuffer = qImage->scanLine(0);
const uchar *iplImagePtr = (const uchar *) iplImage->imageData;
for (int y = 0; y < height; y++)
{
// Copy line by line
QImagePtr = qImage->scanLine(y);
memcpy(QImagePtr, iplImagePtr, width);
iplImagePtr += widthStep;
}
/*
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
// We take only the highest part of the 16 bit value. It is
//similar to dividing by 256.
//*QImagePtr++ = ((*iplImagePtr++) >> 8);
*QImagePtr = *iplImagePtr;
QImagePtr++;
iplImagePtr++;
}
iplImagePtr += widthStep/sizeof(uchar)-width;
}*/
}
else if (iplImage->nChannels == 3)
{
/* OpenCV image is stored with 3 byte color pixels (3 channels).
We convert it to a 32 bit depth QImage.
*/
qImageBuffer = (uchar *) malloc(width*height*4*sizeof(uchar));
uchar *QImagePtr = qImageBuffer;
const uchar *iplImagePtr = (const uchar *) iplImage->imageData;
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
// We cannot help but copy manually.
QImagePtr[0] = iplImagePtr[0];
QImagePtr[1] = iplImagePtr[1];
QImagePtr[2] = iplImagePtr[2];
QImagePtr[3] = 0;
QImagePtr += 4;
iplImagePtr += 3;
}
iplImagePtr += widthStep-3*width;
}
}
else
{
qDebug("IplImageToQImage: image format is not supported : depth=8U and %d channels\n", iplImage->nChannels);
}
break;
case IPL_DEPTH_16U:
if (iplImage->nChannels == 1)
{
/* OpenCV image is stored with 2 bytes grey pixel. We convert it
to an 8 bit depth QImage.
*/
qImage = new QImage(width,height,QImage::Format_Indexed8);
uchar *QImagePtr = qImage->scanLine(0);
qImageBuffer = qImage->scanLine(0);
//const uint16_t *iplImagePtr = (const uint16_t *);
const unsigned short *iplImagePtr = (const unsigned short *)iplImage->imageData;
for (int y = 0; y < height; y++)
{
QImagePtr = qImage->scanLine(y);
for (int x = 0; x < width; x++)
{
// We take only the highest part of the 16 bit value. It is
//similar to dividing by 256.
//*QImagePtr++ = ((*iplImagePtr++) >> 8);
//change here 16 bit could be everything !! set max min to your desire
*QImagePtr = 255*(((*iplImagePtr) - mini) / (maxi - mini));
QImagePtr++;
iplImagePtr++;
}
iplImagePtr += widthStep/sizeof(unsigned short)-width;
}
}
else
{
qDebug("IplImageToQImage: image format is not supported : depth=16U and %d channels\n", iplImage->nChannels);
}
break;
case IPL_DEPTH_32F:
if (iplImage->nChannels == 1)
{
/* OpenCV image is stored with float (4 bytes) grey pixel. We
convert it to an 8 bit depth QImage.
*/
qImage = new QImage(width,height,QImage::Format_Indexed8);
uchar *QImagePtr = qImage->scanLine(0);
qImageBuffer = qImage->scanLine(0);
const float *iplImagePtr = (const float *) iplImage->imageData;
for (int y = 0; y < height; y++)
{
QImagePtr = qImage->scanLine(y);
for (int x = 0; x < width; x++)
{
uchar p;
float pf = 255 * ((*iplImagePtr++) - mini) / (maxi - mini);
if (pf < 0) p = 0;
else if (pf > 255) p = 255;
else p = (uchar) pf;
*QImagePtr++ = p;
}
iplImagePtr += widthStep/sizeof(float)-width;
}
}
else
{
qDebug("IplImageToQImage: image format is not supported : depth=32F and %d channels\n", iplImage->nChannels);
}
break;
case IPL_DEPTH_64F:
if (iplImage->nChannels == 1)
{
/* OpenCV image is stored with double (8 bytes) grey pixel. We
convert it to an 8 bit depth QImage.
*/
qImage = new QImage(width,height,QImage::Format_Indexed8);
uchar *QImagePtr = qImage->scanLine(0);
qImageBuffer = qImage->scanLine(0);
const double *iplImagePtr = (const double *) iplImage->imageData;
for (int y = 0; y < height; y++)
{
QImagePtr = qImage->scanLine(y);
for (int x = 0; x < width; x++)
{
uchar p;
double pf = 255 * ((*iplImagePtr++) - mini) / (maxi - mini);
if (pf < 0) p = 0;
else if (pf > 255) p = 255;
else p = (uchar) pf;
*QImagePtr++ = p;
}
}
}
else
{
qDebug("IplImageToQImage: image format is not supported : depth=64F and %d channels\n", iplImage->nChannels);
}
break;
default:
qDebug("IplImageToQImage: image format is not supported : depth=%d and %d channels\n", iplImage->depth, iplImage->nChannels);
}
QVector<QRgb> vcolorTable;
if (iplImage->nChannels == 1)
{
// We should check who is going to destroy this allocation.
vcolorTable.resize(256);
for (int i = 0; i < 256; i++)
{
vcolorTable[i] = qRgb(i, i, i);
}
//Qt vector is difficult to use... start with std to qvector
//here I allocate QImage using qt constructor (Forma_Indexed8 adds sometimes 2 bytes on the right side !!! o.O not specified nowhere !!!)
//qImage = new QImage(tmpImg->scanLine(0), width, height, QImage::Format_Indexed8);
qImage->setColorTable(vcolorTable);
}
else
{
qImage = new QImage(qImageBuffer, width, height, QImage::Format_RGB32);
}
*data = qImageBuffer;
return qImage;
}
I don t know if 3 channels have also the same bug but I hope not