I use deep learning algorithms to detect elements in an image. Once these elements are detected I try to recover two colours in this image.
Here is an example of the images I process:
To make it easier I contrast the image to improve the colours here is an example:
My goal is to find in this image the blue and red colour, it is at this precise moment that I block. When the image is of good quality I manage to find the colours but on other images of less good quality it is very difficult to get good results.
Knowing that the colours that I would like to find are the following: red, green, blue, yellow, grey, brown, violet, turquoise, orange, pink
Do you know of any image processing methods or machine learning models that could solve my problem?
More images for exemple :
And the code i used :
import cv2
import copy
from sklearn import multioutput
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import numpy as np
from collections import Counter
from skimage.color import rgb2lab, deltaE_cie76
import os
from PIL import Image, ImageEnhance
class ImageColorDetection(object):
origineFrame : list = []
imageFrame : list = []
hsvFrame : list = []
colorList : dict = {}
def __init__(self, array=None, path=None, rotated=0):
self.colorList = {}
if path is not None:
self.origineFrame = Image.open(path).convert('RGB').rotate(rotated)
im_output = Image.open(path).convert('RGB').rotate(rotated)
elif array is not None:
self.origineFrame = Image.fromarray(array).convert('RGB').rotate(rotated)
im_output = Image.fromarray(array).convert('RGB').rotate(rotated)
else:
raise Exception('Aucune image n\'est renseigner dans le constructeur')
#im_output = im_output.filter(ImageFilter.BLUR)
#im_output = im_output.filter(ImageFilter.EDGE_ENHANCE_MORE)
#im_output = ImageOps.autocontrast(im_output, cutoff = 5, ignore = 5)
enhancer = ImageEnhance.Color(im_output)
im_output = enhancer.enhance(3)
enhancer = ImageEnhance.Contrast(im_output)
im_output = enhancer.enhance(0.9)
enhancer = ImageEnhance.Sharpness(im_output)
im_output = enhancer.enhance(2)
enhancer = ImageEnhance.Brightness(im_output)
im_output = enhancer.enhance(1.6)
im_output = np.array(im_output)
self.imageFrame = cv2.cvtColor(im_output, cv2.COLOR_RGB2BGR)
self.hsvFrame = cv2.cvtColor(self.imageFrame, cv2.COLOR_BGR2HSV)
def findColor(self, color_rgb, color_title, color_upper, color_lower):
kernal = np.ones((5, 5), "uint8")
color_mask = cv2.inRange(self.hsvFrame, color_lower, color_upper)
color_mask = cv2.dilate(color_mask, kernal)
res_red = cv2.bitwise_and(self.imageFrame, self.imageFrame,
mask = color_mask)
current_area = 0
x, y, w, h, (r,g,b) = 0, 0, 0, 0, color_rgb
# Creating contour to track blue color
im, contours, hierarchy = cv2.findContours(color_mask,
cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
for pic, contour in enumerate(contours):
area = cv2.contourArea(contour)
if(area > 1000 and current_area < area):
x, y, w, h = cv2.boundingRect(contour)
self.colorList[color_title] = x, y, w, h, color_rgb
current_area = area
return color_title in self.colorList.keys()
def ShowImage(self):
tmp_img = np.asarray(copy.copy(self.origineFrame))
for color in self.colorList:
cv2.rectangle(
tmp_img,
(self.colorList[color][0], self.colorList[color][1]),
((self.colorList[color][0] + self.colorList[color][2]), (self.colorList[color][1] + self.colorList[color][3])),
self.colorList[color][4], 2)
cv2.putText(
tmp_img,
color,
(self.colorList[color][0], self.colorList[color][1]),
cv2.FONT_HERSHEY_SIMPLEX,
1.0,
self.colorList[color][4])
#plt.imshow(tmp_img, multioutput=True)
return tmp_img
def ShowImageContrast(self):
tmp_img = copy.copy(self.imageFrame)
tmp_img = cv2.cvtColor(tmp_img, cv2.COLOR_BGR2RGB)
for color in self.colorList:
cv2.rectangle(
tmp_img,
(self.colorList[color][0], self.colorList[color][1]),
((self.colorList[color][0] + self.colorList[color][2]), (self.colorList[color][1] + self.colorList[color][3])),
self.colorList[color][4], 3)
cv2.putText(
tmp_img,
color,
(self.colorList[color][0], self.colorList[color][1]),
cv2.FONT_HERSHEY_SIMPLEX,
0.8,
self.colorList[color][4])
#plt.imshow(tmp_img, multioutput=True)
return tmp_img
def RGB2HEX(self, color):
return "#{:02x}{:02x}{:02x}".format(int(color[0]), int(color[1]), int(color[2]))
def get_colors(self, contrasted, number_of_colors, show_chart):
if contrasted:
modified_image = cv2.resize(np.asarray(self.imageFrame), (600, 400), interpolation = cv2.INTER_AREA)
else:
modified_image = cv2.resize(np.asarray(self.origineFrame), (600, 400), interpolation = cv2.INTER_AREA)
#modified_image = cv2.resize(np.asarray(self.origineFrame), (600, 400), interpolation = cv2.INTER_AREA)
modified_image = modified_image.reshape(modified_image.shape[0]*modified_image.shape[1], 3)
clf = KMeans(n_clusters = number_of_colors)
labels = clf.fit_predict(modified_image)
counts = Counter(labels)
# sort to ensure correct color percentage
counts = dict(sorted(counts.items()))
center_colors = clf.cluster_centers_
# We get ordered colors by iterating through the keys
ordered_colors = [center_colors[i] for i in counts.keys()]
hex_colors = [self.RGB2HEX(ordered_colors[i]) for i in counts.keys()]
rgb_colors = [ordered_colors[i] for i in counts.keys()]
print("Nombre de couleur : ", len(hex_colors))
if (show_chart):
plt.figure(figsize = (8, 6))
plt.pie(counts.values(), labels = hex_colors, colors = hex_colors)
return counts, hex_colors, rgb_colors
Attempt with HSV :
With the HSV my problem persists I found a tutorial that sets up the problem I'm trying to solve but it doesn't work on the images I'm trying to process, do you have any knowledge on the subject (courses, youtube videos, articles)?
Voici l'article en question : https://towardsdatascience.com/color-identification-in-images-machine-learning-application-b26e770c4c71
Convert you image to HSV
cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
, than create threshold vectors likeUsing this thresholds you can filter different colors, read more about HSV