照片的紫边处理

/ 0评 / 0

我们的项目是智能相机,而现在的数码相机都会有一个通病,那就是紫边问题,在一些明暗交界的地方,会出现蓝紫色的线条,这样很影响照片的色彩,所以要解决掉这些颜色,我在查了很多文档之后在iOS端做了一定的处理,能够解决掉一部分紫边。

解决思路

1.读取图片像素信息,根据亮度以180为界限,高的在创建的新图中写入一个白点,低的写入一个黑点,得要一张阈值图;
2.在阈值图上使用边缘查找算法,输出到新的图上,得到一张边缘图,这就是原图的明暗交界边缘图;
3.以边缘图上每个白点 r==255 为圆心,以R为半径画实心圆,白色为填充色,就得到了一个宽度为2R的白线

4.以3中得到的图为模板图,循环读取,若 r == 255,则该点为白点,读取原图相对应位置像素点的RGB色值,换算成HSL(色调、饱和度、亮度),如果H(色调)处于[140,230](这个是0~255的范围,我们计算出来的范围是0~360,所以需要转换一下)范围内,那么令该点的R=G=B=(R+G+B)/3,使其亮度不变,饱和度降低,达到去紫边的目的。

代码实现

我这里用到了GPUImage里面的东西,可以自行取舍

//图片处理
#define min3v(v1, v2, v3) ((v1)>(v2)? ((v2)>(v3)?(v3):(v2)):((v1)>(v3)?(v3):(v2)))
#define max3v(v1, v2, v3) ((v1)<(v2)? ((v2)<(v3)?(v3):(v2)):((v1)<(v3)?(v3):(v1)))
#define T_PI M_PI*2

typedef struct{
    int red;
    int green;
    int blue;
} COLOR_RGB;

typedef struct{
    float hue;
    float saturation;
    float luminance;
} COLOR_HSL;

typedef struct{
    float c;
    float m;
    float y;
    float k;
} COLOR_CMYK;

typedef struct{
    size_t i;
    size_t j;
} POINT;

void RGBtoHSL(/*[in]*/const COLOR_RGB *rgb, /*[out]*/COLOR_HSL *hsl){
    float h=0, s=0, l=0;
    // normalizes red-green-blue values
    float r = rgb->red/255.f;
    float g = rgb->green/255.f;
    float b = rgb->blue/255.f;
    float maxVal = max3v(r, g, b);
    float minVal = min3v(r, g, b);
    // hue
    if(maxVal == minVal){
        h = 0; // undefined
    }else if(maxVal== r && g>=b){
        h = 60.0f*(g-b)/(maxVal-minVal);
    }else if(maxVal==r && g0.5f){
        s = (maxVal-minVal)/(2 - (maxVal+minVal)); //(maxVal-minVal > 0)?
    }
    hsl->hue = (h>360)? 360 : ((h<0)?0:h);
    hsl->saturation = ((s>1)? 1 : ((s<0)?0:s))*100;
    hsl->luminance = ((l>1)? 1 : ((l<0)?0:l))*100;
}

// Converts HSL to RGB
void HSLtoRGB(const COLOR_HSL *hsl, COLOR_RGB *rgb){
    float h = hsl->hue;                  // h must be [0, 360]
    float s = hsl->saturation/100.f; // s must be [0, 1]
    float l = hsl->luminance/100.f;      // l must be [0, 1]
    float R, G, B;
    if(hsl->saturation == 0){
        // achromatic color (gray scale)
        R = G = B = l*255.0f;
    }else{
        float q = (l<0.5f)?(l * (1.0f+s)):(l+s - (l*s));
        float p = (2.0f * l) - q;
        float Hk = h/360.0f;
        float T[3];
        T[0] = Hk + 0.3333333f; // Tr   0.3333333f=1.0/3.0
        T[1] = Hk;              // Tb
        T[2] = Hk - 0.3333333f; // Tg
        for(int i=0; i<3; i++){
            if(T[i] < 0) T[i] += 1.0f;
            if(T[i] > 1) T[i] -= 1.0f;
            if((T[i]*6) < 1){
                T[i] = p + ((q-p)*6.0f*T[i]);
            }else if((T[i]*2.0f) < 1) //(1.0/6.0)<=T[i] && T[i]<0.5
            {
                T[i] = q;
            }else if((T[i]*3.0f) < 2) // 0.5<=T[i] && T[i]<(2.0/3.0)
            {
                T[i] = p + (q-p) * ((2.0f/3.0f) - T[i]) * 6.0f;
            }
            else T[i] = p;
        }
        R = T[0]*255.0f;
        G = T[1]*255.0f;
        B = T[2]*255.0f;
    }
    
    rgb->red = (int)((R>255)? 255 : ((R<0)?0 : R));
    rgb->green = (int)((G>255)? 255 : ((G<0)?0 : G));
    rgb->blue = (int)((B>255)? 255 : ((B<0)?0 : B));
}

+ (UIImage *)removePurpleSide:(UIImage *)image{
//    UIImage *lightImage = [self luminanceThreshold:image];
    UIImage *newImage = [Tool prewittEdgeDetection:image];
    
    //边缘图
    CGImageRef cgimage = [newImage CGImage];
    size_t width = CGImageGetWidth(cgimage); // 图片宽度
    size_t height = CGImageGetHeight(cgimage); // 图片高度
    size_t size = width * height * 4;
    unsigned char *data = calloc(size, sizeof(unsigned char)); // 取图片首地址
    size_t bitsPerComponent = 8; // r g b a 每个component bits数目
    size_t bytesPerRow = width * 4; // 一张图片每行字节数目 (每个像素点包含r g b a 四个字节)
    CGColorSpaceRef space = CGColorSpaceCreateDeviceRGB(); // 创建rgb颜色空间
    CGRect rect = CGRectMake(0, 0, width, height);
    
    CGContextRef context =
    CGBitmapContextCreate(data,
                          width,
                          height,
                          bitsPerComponent,
                          bytesPerRow,
                          space,
                          kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
    CGContextDrawImage(context, rect, cgimage);
    
    
    NSMutableArray *pointArray = [NSMutableArray array];
    for (size_t i = 0; i < height; i++){
        size_t k = i * bytesPerRow;
        for (size_t j = 0; j < width; j++){
            size_t pixelIndex = k + j * 4;
            int red = data[pixelIndex];
//            int green = data[pixelIndex+1];
//            int blue = data[pixelIndex+2];
            if (red == 255) {
                POINT point={i,j};
                NSValue *value = [NSValue value:&point withObjCType:@encode(POINT)];
                [pointArray addObject:value];
            }
        }
    }
    
    //翻转矩阵,因为绘图的y轴方向不同,所以不设置会画反
    CGContextTranslateCTM(context, 0, height);
    CGContextScaleCTM(context, 1.0, -1.0);
    
    POINT point;

    for (int i = 0; i < pointArray.count; i++) {
        NSValue *pointValue = [pointArray objectAtIndex:i];
        [pointValue getValue:&point];
        CGContextAddArc(context, point.j,point.i,8,8,T_PI,YES);
        CGContextSetFillColorWithColor(context,[UIColor whiteColor].CGColor);
        CGContextFillPath(context);
    }
    
    //原图
    CGImageRef cgimage1 = [image CGImage];
    unsigned char *data1 = calloc(size, sizeof(unsigned char)); // 取图片首地址
    CGContextRef context1 =
    CGBitmapContextCreate(data1,
                          width,
                          height,
                          bitsPerComponent,
                          bytesPerRow,
                          space,
                          kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
    CGContextDrawImage(context1, rect, cgimage1);

    for (size_t i = 0; i < height; i++){
        size_t k = i * bytesPerRow;
        for (size_t j = 0; j < width; j++){
            size_t pixelIndex = k + j * 4;
            int red = data[pixelIndex];
//            int green = data[pixelIndex+1];
//            int blue = data[pixelIndex+2];
            if (red == 255) {
                int red1 = data1[pixelIndex];
                int green1 = data1[pixelIndex+1];
                int blue1 = data1[pixelIndex+2];
                
                COLOR_RGB old_color = {red1,green1,blue1};
                COLOR_HSL new_color;
                RGBtoHSL(&old_color, &new_color);
                
                if (new_color.hue >= 197 && new_color.hue <= 325) {
                    int color = (red1 + green1 + blue1)/3;
                    data1[pixelIndex] = color;
                    data1[pixelIndex+1] = color;
                    data1[pixelIndex+2] = color;
                }
            }
        }
    }

    cgimage1 = CGBitmapContextCreateImage(context1);
    
    CGColorSpaceRelease(space);
    CGContextRelease(context);
    CGContextRelease(context1);
    
    return [UIImage imageWithCGImage:cgimage1];
}


//查找阈值图
+ (UIImage *)luminanceThreshold:(UIImage *)inputImage{
    // 亮度阈
    GPUImageLuminanceThresholdFilter *gaussianBlurFilter = [[GPUImageLuminanceThresholdFilter alloc] init];
    [gaussianBlurFilter forceProcessingAtSize:inputImage.size];
    [gaussianBlurFilter useNextFrameForImageCapture];
    //获取数据源
    GPUImagePicture *stillImageSource = [[GPUImagePicture alloc]initWithImage:inputImage];
    //添加上滤镜
    [stillImageSource addTarget:gaussianBlurFilter];
    //开始渲染
    [stillImageSource processImage];
    //加载出来
    return [gaussianBlurFilter imageFromCurrentFramebuffer];
}

//边缘查找算法
+ (UIImage *)prewittEdgeDetection:(UIImage *)image{
    GPUImagePrewittEdgeDetectionFilter *disFilter = [[GPUImagePrewittEdgeDetectionFilter alloc] init];
    //设置要渲染的区域
    [disFilter forceProcessingAtSize:image.size];
    [disFilter useNextFrameForImageCapture];
    //获取数据源
    GPUImagePicture *stillImageSource = [[GPUImagePicture alloc] initWithImage:image];
    //添加上滤镜
    [stillImageSource addTarget:disFilter];
    //开始渲染
    [stillImageSource processImage];
    //获取渲染后的图片
    return [disFilter imageFromCurrentFramebuffer];
}

这个方法处理的还算可以,就是稍微费一点点时间,看到的有好方法可以告诉我~

评论已关闭。