zoukankan      html  css  js  c++  java
  • 检测照度/噪声水平

      1 from __future__ import division
      2 import os, time, scipy.io
      3 import tensorflow as tf
      4 import numpy as np
      5 import rawpy
      6 import glob
      7 from model_sid_latest import network_my_unet
      8 import platform
      9 from PIL import Image
     10 
     11 # BBF100-2
     12 bbf_w = 4032 // 2
     13 bbf_h = 3024 // 2
     14 
     15 input_dir = 'D:/data/Sony/dataset/illu_detect/'
     16 
     17 
     18 def preprocess(raw, bl, wl):
     19     im = raw.raw_image_visible.astype(np.float32)
     20     im = np.maximum(im - bl, 0)
     21     return im / (wl - bl)
     22 
     23 
     24 def pack_raw_bbf(path):
     25     raw = rawpy.imread(path)
     26     bl = 64
     27     wl = 1023
     28     im = preprocess(raw, bl, wl)
     29     im = np.expand_dims(im, axis=2)
     30     H = im.shape[0]
     31     W = im.shape[1]
     32     if raw.raw_pattern[0, 0] == 0: # CFA=RGGB
     33         out = np.concatenate((im[0:H:2, 0:W:2, :],
     34                               im[0:H:2, 1:W:2, :],
     35                               im[1:H:2, 1:W:2, :],
     36                               im[1:H:2, 0:W:2, :]), axis=2)
     37     elif raw.raw_pattern[0,0] == 2: # BGGR
     38         out = np.concatenate((im[1:H:2, 1:W:2, :],
     39                               im[0:H:2, 1:W:2, :],
     40                               im[0:H:2, 0:W:2, :],
     41                               im[1:H:2, 0:W:2, :]), axis=2)
     42     elif raw.raw_pattern[0,0] == 1 and raw.raw_pattern[0,1] == 0: # GRBG
     43         out = np.concatenate((im[0:H:2, 1:W:2, :],
     44                               im[0:H:2, 0:W:2, :],
     45                               im[1:H:2, 0:W:2, :],
     46                               im[1:H:2, 1:W:2, :]), axis=2)
     47     elif raw.raw_pattern[0,0] == 1 and raw.raw_pattern[0,1] == 2: # GBRG
     48         out = np.concatenate((im[1:H:2, 0:W:2, :],
     49                               im[0:H:2, 0:W:2, :],
     50                               im[0:H:2, 1:W:2, :],
     51                               im[1:H:2, 1:W:2, :]), axis=2)
     52     else:
     53         assert False
     54     # wb = np.array(raw.camera_whitebalance)
     55     # wb[3] = wb[1]
     56     # wb = wb / wb[1]
     57     # out = np.minimum(out * wb, 1.0)
     58     return out
     59 
     60 
     61 in_im = tf.placeholder(tf.float32, [1, bbf_h, bbf_w, 4], name='input')
     62 
     63 # Obtain the noise level map
     64 radius = 3
     65 strides = 32
     66 
     67 # in_patches_r = tf.extract_image_patches(
     68 #     tf.expand_dims(
     69 #         in_im[:, :, :, 0], axis=3),
     70 #     (1, radius, radius, 1),
     71 #     (1, strides, strides, 1),
     72 #     (1, 1, 1, 1),
     73 #     'VALID')
     74 # miu_r = tf.maximum(tf.reduce_mean(in_patches_r, axis=-1, keepdims=True), 1e-7)
     75 # sigma_r = tf.reduce_sum(tf.abs((in_patches_r - miu_r) / miu_r), axis=-1, keepdims=True)/(radius * radius - 1)
     76 
     77 in_patches_g = tf.extract_image_patches(
     78     tf.expand_dims(
     79         in_im[:, :, :, 3], axis=3),
     80     (1, radius, radius, 1),
     81     (1, strides, strides, 1),
     82     (1, 1, 1, 1),
     83     'VALID')
     84 miu_g = tf.maximum(tf.reduce_mean(in_patches_g, axis=-1, keepdims=True), 1e-7)
     85 sigma_g = tf.reduce_sum(tf.abs((in_patches_g - miu_g) / miu_g), axis=-1, keepdims=True)/(radius * radius - 1)
     86 
     87 # in_patches_b = tf.extract_image_patches(
     88 #     tf.expand_dims(
     89 #         in_im[:, :, :, 2], axis=3),
     90 #     (1, radius, radius, 1),
     91 #     (1, strides, strides, 1),
     92 #     (1, 1, 1, 1),
     93 #     'VALID')
     94 # miu_b = tf.maximum(tf.reduce_mean(in_patches_b, axis=-1, keepdims=True), 1e-7)
     95 # sigma_b = tf.reduce_sum(tf.abs((in_patches_b - miu_b) / miu_b), axis=-1, keepdims=True)/(radius * radius - 1)
     96 #
     97 # sigma_level = tf.reduce_mean(tf.minimum(tf.minimum(sigma_r, sigma_g), sigma_b))
     98 
     99 sigma_level = tf.reduce_mean(sigma_g)
    100 illu_level = tf.reduce_mean(miu_g)
    101 print(in_patches_g.shape)
    102 
    103 sess = tf.Session()
    104 # print('============= DAY ============')
    105 # files = glob.glob(input_dir + '/day/*.dng')
    106 # files.sort()
    107 # for i in range(len(files)):
    108 #     noise_level = sess.run(sigma_level, feed_dict={in_im: np.expand_dims(pack_raw_bbf(files[i]), axis=0)})
    109 #     illu = 0.3 / noise_level
    110 #     print(illu)
    111 #
    112 # print('============= NIGHT ============')
    113 # files = glob.glob(input_dir + '/night/*.dng')
    114 # files.sort()
    115 # for i in range(len(files)):
    116 #     noise_level = sess.run(sigma_level, feed_dict={in_im: np.expand_dims(pack_raw_bbf(files[i]), axis=0)})
    117 #     illu = 0.3 / noise_level
    118 #     print(illu)
    119 
    120 print('============= NIGHT ============')
    121 files = glob.glob('C:/Users/Administrator/Desktop/ILLU/*.dng')
    122 files.sort()
    123 for i in range(len(files)):
    124     sigma_, illu_ = sess.run([sigma_level, illu_level], feed_dict={in_im: np.expand_dims(pack_raw_bbf(files[i]), axis=0)})
    125     illu = 0.3 / sigma_
    126     print(illu)
    127     print(illu_)
    128     print('--------------')

    通过检测每个采样像素的邻域均方差来确定画面的整体噪声水平。

    这里需要注意的是每个点的明暗程度不一致,因此需要对均方差进行亮度归一化。

    【上述程序有个BUG】

    在存在明显的光源时,会出现照度测量不正确的情形。为了解决这一问题,引入全局画面亮度作为参考,即:平均照度应该考量总体像素值波动幅度的同时对全局亮度的归一。

     1 from __future__ import division
     2 import os, time, scipy.io
     3 import tensorflow as tf
     4 import numpy as np
     5 import rawpy
     6 import glob
     7 from model_sid_latest import network_my_unet
     8 import platform
     9 from PIL import Image
    10 
    11 # BBF100-2
    12 bbf_w = 4032 // 2
    13 bbf_h = 3024 // 2
    14 
    15 input_dir = 'D:/data/Sony/dataset/illu_detect/'
    16 
    17 
    18 def preprocess(raw, bl, wl):
    19     im = raw.raw_image_visible.astype(np.float32)
    20     im = np.maximum(im - bl, 0)
    21     return im / (wl - bl)
    22 
    23 
    24 def pack_raw_bbf(path):
    25     raw = rawpy.imread(path)
    26     bl = 64
    27     wl = 1023
    28     im = preprocess(raw, bl, wl)
    29     im = np.expand_dims(im, axis=2)
    30     H = im.shape[0]
    31     W = im.shape[1]
    32     if raw.raw_pattern[0, 0] == 0: # CFA=RGGB
    33         out = np.concatenate((im[0:H:2, 0:W:2, :],
    34                               im[0:H:2, 1:W:2, :],
    35                               im[1:H:2, 1:W:2, :],
    36                               im[1:H:2, 0:W:2, :]), axis=2)
    37     elif raw.raw_pattern[0,0] == 2: # BGGR
    38         out = np.concatenate((im[1:H:2, 1:W:2, :],
    39                               im[0:H:2, 1:W:2, :],
    40                               im[0:H:2, 0:W:2, :],
    41                               im[1:H:2, 0:W:2, :]), axis=2)
    42     elif raw.raw_pattern[0,0] == 1 and raw.raw_pattern[0,1] == 0: # GRBG
    43         out = np.concatenate((im[0:H:2, 1:W:2, :],
    44                               im[0:H:2, 0:W:2, :],
    45                               im[1:H:2, 0:W:2, :],
    46                               im[1:H:2, 1:W:2, :]), axis=2)
    47     elif raw.raw_pattern[0,0] == 1 and raw.raw_pattern[0,1] == 2: # GBRG
    48         out = np.concatenate((im[1:H:2, 0:W:2, :],
    49                               im[0:H:2, 0:W:2, :],
    50                               im[0:H:2, 1:W:2, :],
    51                               im[1:H:2, 1:W:2, :]), axis=2)
    52     else:
    53         assert False
    54     # wb = np.array(raw.camera_whitebalance)
    55     # wb[3] = wb[1]
    56     # wb = wb / wb[1]
    57     # out = np.minimum(out * wb, 1.0)
    58     return out
    59 
    60 
    61 in_im = tf.placeholder(tf.float32, [1, bbf_h, bbf_w, 4], name='input')
    62 
    63 # Obtain the noise level map
    64 radius = 3
    65 strides = 32
    66 
    67 in_patches_g = tf.extract_image_patches(
    68     tf.expand_dims(
    69         in_im[:, :, :, 3], axis=3),
    70     (1, radius, radius, 1),
    71     (1, strides, strides, 1),
    72     (1, 1, 1, 1),
    73     'VALID')
    74 miu_g = tf.maximum(tf.reduce_mean(in_patches_g, axis=-1, keepdims=True), 1e-7)
    75 illu_level = tf.reduce_mean(miu_g)
    76 # sigma_g = tf.reduce_sum(tf.abs((in_patches_g - miu_g) / miu_g), axis=-1, keepdims=True)/(radius * radius - 1)
    77 sigma_g = tf.reduce_sum(tf.abs((in_patches_g - miu_g) / illu_level), axis=-1, keepdims=True)/(radius * radius - 1)
    78 
    79 sigma_level = tf.reduce_mean(sigma_g)
    80 print(in_patches_g.shape)
    81 
    82 sess = tf.Session()
    83 
    84 files = glob.glob('C:/Users/Administrator/Desktop/ILLU/*.dng')
    85 
    86 files.sort()
    87 for i in range(len(files)):
    88     sigma_, illu_ = sess.run([sigma_level, illu_level], feed_dict={in_im: np.expand_dims(pack_raw_bbf(files[i]), axis=0)})
    89     illu = 0.25 / sigma_
    90     print(illu)
  • 相关阅读:
    【转】高性能网络编程4--TCP连接的关闭
    Kubernetes 用了,延迟高了 10 倍,问题在哪?
    多路复用和多路分用
    网络七层模型与四层模型区别
    Go验证包出错 dial tcp 34.64.4.17:443: i/o timeout
    spring Bean配置的三种形式
    Spring容器IOC初始化过程
    Go 特殊语法
    服务发现的基本原理与比较:Eureka vs Consul vs Zookeeper
    docker 常用命令
  • 原文地址:https://www.cnblogs.com/thisisajoke/p/10412046.html
Copyright © 2011-2022 走看看