-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathmain.py
1509 lines (1259 loc) · 77.5 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
from collections import Counter
import gradio as gr
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter
from skimage.metrics import structural_similarity as ssim
import numpy as np
from scipy.spatial import cKDTree
from numpy import std
from PIL import Image, features
import random
import os
import zipfile
import threading
import time
from sklearn.cluster import KMeans
from skimage.color import rgb2lab, lab2rgb # For color space conversion
from sklearn.cluster import KMeans
from collections import Counter
from scipy.spatial.distance import cdist # For calculating distances between color sets
import traceback
from skimage.metrics import structural_similarity as ssim
import numpy as np
import concurrent.futures
from concurrent.futures import ProcessPoolExecutor
# Constants for dithering and quantization methods
DITHER_METHODS = {
"None": Image.Dither.NONE,
"Floyd-Steinberg": Image.Dither.FLOYDSTEINBERG
}
QUANTIZATION_METHODS = {
"Median cut": Image.Quantize.MEDIANCUT,
"Maximum coverage": Image.Quantize.MAXCOVERAGE,
"Fast octree": Image.Quantize.FASTOCTREE,
"libimagequant": Image.Quantize.LIBIMAGEQUANT
}
def tile_variance(tile):
"""Compute the variance of a tile."""
arr = np.array(tile)
return np.std(arr, axis=(0, 1)).mean() # Compute the mean std deviation across color channels
def tile_similarity(tile1, tile2):
"""Calculate the Hamming similarity between two tiles."""
# Convert tiles to numpy arrays if they aren't already
arr1 = np.array(tile1)
arr2 = np.array(tile2)
# Flatten arrays to compare them pixel-by-pixel
flat1 = arr1.flatten()
flat2 = arr2.flatten()
# Calculate Hamming distance
hamming_distance = np.sum(flat1 != flat2)
# Normalize the Hamming distance to get a similarity measure
similarity = 1 - (hamming_distance / flat1.size)
return similarity
def dominant_color(tile, color_palette):
# Convert the tile to a NumPy array
arr = np.array(tile)
# Check the shape of the array to determine if it's grayscale or color
if len(arr.shape) == 2:
# Grayscale image, so reshape it to (-1, 1) instead of (-1, 3)
arr = arr.reshape(-1, 1)
elif len(arr.shape) == 3:
# Color image, so ensure it's reshaped correctly for RGB
arr = arr.reshape(-1, 3)
else:
# Unexpected image format
raise ValueError("Unexpected image format!")
# For grayscale images, the dominant 'color' will just be the most common value
if arr.shape[1] == 1:
unique, counts = np.unique(arr, return_counts=True)
dominant = unique[np.argmax(counts)]
return (dominant,) * 3 # Return as a tuple to keep consistent format with RGB
else:
# Find the most frequent color in the case of an RGB image
unique, counts = np.unique(arr, axis=0, return_counts=True)
dominant_index = np.argmax(counts)
return tuple(unique[dominant_index]) # Convert to tuple to match expected format
def apply_gothic_filter(image, threshold, dot_size, spacing, contrast_boost=1.5, edge_enhance=True, noise_factor=0.1,
apply_blur=True, irregular_shape=True, irregular_size=True):
original_mode = image.mode
if original_mode == 'P':
image = image.convert('RGB')
# Increase contrast
enhancer = ImageEnhance.Contrast(image)
image = enhancer.enhance(contrast_boost)
# Edge enhancement
if edge_enhance:
image = image.filter(ImageFilter.EDGE_ENHANCE_MORE)
img_array = np.array(image)
# Determine background color
unique_colors, color_counts = np.unique(img_array.reshape(-1, 3), axis=0, return_counts=True)
tree = cKDTree(unique_colors)
# Choose background color: darkest color among the top 3 most common colors
top_colors = unique_colors[np.argsort(color_counts)[-3:]]
background_color = tuple(top_colors[np.argmin(np.sum(top_colors, axis=1))])
result = Image.new('RGB', image.size, color=background_color)
draw = ImageDraw.Draw(result)
# Create a distressed texture if irregular size is enabled
if irregular_size:
texture = Image.new('L', image.size)
texture_draw = ImageDraw.Draw(texture)
for _ in range(int(image.width * image.height * 0.1)): # Adjust density as needed
x = random.randint(0, image.width - 1)
y = random.randint(0, image.height - 1)
texture_draw.point((x, y), fill=random.randint(0, 255))
for y in range(0, image.height, spacing):
for x in range(0, image.width, spacing):
original_color = img_array[y, x]
luminance = 0.299 * original_color[0] + 0.587 * original_color[1] + 0.114 * original_color[2]
if luminance > threshold:
_, index = tree.query(original_color)
nearest_color = tuple(unique_colors[index])
# Determine dot size
if irregular_size:
texture_value = texture.getpixel((x, y))
adjusted_dot_size = max(1, int(dot_size * (texture_value / 255)))
else:
adjusted_dot_size = dot_size
# Add slight randomness to dot position
x_offset = int(random.uniform(-spacing / 2, spacing / 2) * noise_factor)
y_offset = int(random.uniform(-spacing / 2, spacing / 2) * noise_factor)
if irregular_shape:
# Draw an irregular shape
points = []
for i in range(8): # 8-sided irregular shape
angle = i * (2 * np.pi / 8) + random.uniform(0, np.pi / 4)
r = adjusted_dot_size * (1 + random.uniform(-0.2, 0.2)) # Vary the radius
px = x + x_offset + int(r * np.cos(angle))
py = y + y_offset + int(r * np.sin(angle))
points.append((px, py))
draw.polygon(points, fill=nearest_color)
else:
# Draw a regular circle
draw.ellipse([(x + x_offset - adjusted_dot_size, y + y_offset - adjusted_dot_size),
(x + x_offset + adjusted_dot_size, y + y_offset + adjusted_dot_size)],
fill=nearest_color)
# Apply a slight blur to soften the effect
if apply_blur:
result = result.filter(ImageFilter.GaussianBlur(radius=0.5))
if original_mode == 'P':
# Get the number of colors in the original palette image
original_colors = image.getcolors()
if original_colors is None:
# If there are more than 256 colors, default to 256
original_num_colors = 256
else:
original_num_colors = len(original_colors)
# Quantize the result to match the original number of colors
result = result.quantize(colors=original_num_colors, method=Image.MEDIANCUT)
return result
def most_common_surrounding_color(image, x, y, tile_size, default_color):
"""Calculate the most common color immediately bordering a specific tile."""
border_colors = []
# Define the ranges for the bordering pixels
top_range = (max(0, y - 1), x, min(image.width, x + tile_size))
bottom_range = (min(image.height, y + tile_size), x, min(image.width, x + tile_size))
left_range = (y, max(0, x - 1), min(image.height, y + tile_size))
right_range = (y, min(image.width, x + tile_size), min(image.height, y + tile_size))
# Sample colors from each bordering side
for y_pos, x_start, x_end in [top_range, bottom_range]:
for adj_x in range(x_start, x_end):
try:
color = image.getpixel((adj_x, y_pos))
except IndexError:
color = (0, 0, 0)
if color != (0, 0, 0): # Exclude black if necessary
border_colors.append(color)
else:
border_colors.append(default_color)
for x_pos, y_start, y_end in [left_range, right_range]:
for adj_y in range(y_start, y_end):
try:
color = image.getpixel((x_pos, adj_y))
except IndexError:
color = (0, 0, 0)
if color != (0, 0, 0): # Exclude black if necessary
border_colors.append(color)
# Find the most common border color
if border_colors:
most_common = max(set(border_colors), key=border_colors.count)
return most_common
else:
# Return the default color if no valid bordering colors were found
return default_color
def get_most_common_color(tile):
colors = tile.getcolors()
if tile.mode == 'P':
# If the tile is palettized, get the most common color from the palette
palette = tile.getpalette()
most_common_color = palette[colors[0][1] * 3:colors[0][1] * 3 + 3]
else:
# Otherwise, get the most common color directly
most_common_color = max(colors, key=lambda x: x[0])[1]
return most_common_color
def get_adjacent_common_color(main_image, x, y, default_color):
# Collect colors from adjacent pixels
adjacent_colors = []
for dy in [-1, 0, 1]:
for dx in [-1, 0, 1]:
# Skip the center pixel itself
if dx == 0 and dy == 0:
continue
nx, ny = x + dx, y + dy
if 0 <= nx < main_image.width and 0 <= ny < main_image.height:
adjacent_color = main_image.getpixel((nx, ny))
if adjacent_color != (0, 0, 0):
adjacent_colors.append(adjacent_color)
# Find the most common adjacent color, excluding the default (to avoid counting 'empty' or 'yet to fill' areas)
if adjacent_colors:
most_common = max(set(adjacent_colors), key=adjacent_colors.count)
# get that color from the palette if the image is palettized
if main_image.mode == 'P':
palette = main_image.getpalette()
most_common = palette[most_common * 3:most_common * 3 + 3]
return most_common
else:
return default_color
def adjust_tile_colors(tile, surrounding_colors):
"""Adjust colors of the tile based on surrounding colors."""
if not surrounding_colors:
return tile # No surrounding colors, no adjustment needed
# Count occurrences of each color in surrounding tiles
color_counter = Counter(surrounding_colors)
# Find the most common color
most_common_color = color_counter.most_common(1)[0][0]
# Replace all colors in the tile with the most common color
adjusted_tile = np.full_like(tile, most_common_color)
return adjusted_tile
def most_common_border_color(image, x, y, tile_size, default_color):
"""Calculate the most common color in the bordering pixels of a specific tile."""
border_colors = []
# Define pixel coordinates for the bordering line
border_positions = [(x + i, y) for i in range(-1, tile_size + 1)] + [(x + i, y + tile_size - 1) for i in
range(-1, tile_size + 1)] \
+ [(x, y + i) for i in range(tile_size)] + [(x + tile_size - 1, y + i) for i in range(tile_size)]
# Sample colors from each border pixel, ensuring they are within image bounds
for bx, by in border_positions:
if 0 <= bx < image.width and 0 <= by < image.height:
color = image.getpixel((bx, by))
if color != (0, 0, 0): # Skip black or adjust as needed
border_colors.append(color)
# Find the most common border color
if border_colors:
return max(set(border_colors), key=border_colors.count)
else:
return default_color
def tile_similarity_indexed(tile1, tile2):
# Compare two tiles based on their palette indices
data1 = tile1.load()
data2 = tile2.load()
similar_pixels = 0
total_pixels = tile1.size[0] * tile1.size[1]
for y in range(tile1.size[1]):
for x in range(tile1.size[0]):
value1 = data1[x, y]
value2 = data2[x, y]
if data1[x, y] == data2[x, y]: # Compare index values instead of colors
similar_pixels += 1
return similar_pixels / total_pixels
from PIL import Image
def convert_to_rgb_with_four_colors(image: Image, target_colors: list):
# Ensure image is in 'P' mode; if not, convert it.
if image.mode != 'P':
raise ValueError("Image must be in 'P' mode")
# Create a new RGB image with the same dimensions as the original
new_rgb_image = Image.new('RGB', image.size)
# Get the palette of the original image
original_palette = image.getpalette()
# Map each palette index to one of the four colors
# Assuming 'top_colors' contains RGB tuples of the desired colors
color_mapping = {index: target_colors[index % len(target_colors)] for index in range(256)}
# Convert the palette to an RGB image
for y in range(image.size[1]): # Iterate over height
for x in range(image.size[0]): # Iterate over width
# Get the palette index of the current pixel
index = image.getpixel((x, y))
# Set the new image's pixel to the corresponding color from 'top_colors'
new_rgb_image.putpixel((x, y), color_mapping[index])
return new_rgb_image
def calculate_ssim(tile1, tile2):
"""
Calculate the Structural Similarity Index (SSIM) between two tiles.
"""
# Convert tiles to grayscale for SSIM calculation
tile1_gray = np.array(tile1.convert('L'))
tile2_gray = np.array(tile2.convert('L'))
# Calculate SSIM. Ensure data_range matches the max of the data type.
score, _ = ssim(tile1_gray, tile2_gray, full=True, data_range=255)
return score
def normalize_tile(tile):
"""
Convert a tile to a normalized form based on the frequency of each color,
disregarding the specific colors themselves. The tile should already be in 'P' mode.
"""
# In P mode, the data is already flat: a sequence of indices, not rows of pixels
flat_tile = list(tile.getdata())
# Count frequency of each color (index) in the tile
color_counts = Counter(flat_tile)
# Sort colors by frequency (and then by index value to ensure consistency)
sorted_colors = sorted(color_counts.keys(), key=lambda color: (-color_counts[color], color))
# Map each color to its rank (most common = 0, next = 1, etc.)
color_map = {color: rank for rank, color in enumerate(sorted_colors)}
# Create a new tile with normalized color values
normalized_pixels = [color_map[color] for color in flat_tile]
# Convert back to an Image
new_tile = Image.new('P', tile.size)
new_tile.putdata(normalized_pixels)
return new_tile
def tile_similarity_indexed(tile1, tile2):
"""
Compare two tiles based on their normalized forms.
"""
# Convert both tiles to their normalized forms
norm_tile1 = normalize_tile(tile1)
norm_tile2 = normalize_tile(tile2)
# Now compare the normalized tiles directly
data1 = norm_tile1.getdata()
data2 = norm_tile2.getdata()
# Measure similarity as the percentage of matching pixels
matches = sum(1 for p1, p2 in zip(data1, data2) if p1 == p2)
return matches / len(data1)
def map_pattern_to_palette(source_tile, target_tile):
"""
Maps the color pattern from the source tile to the target tile's palette.
:param source_tile: The source tile (Image object) whose pattern will be used.
:param target_tile: The target tile (Image object) whose palette will be applied.
:return: A new tile with the source pattern and target palette.
"""
# Get the normalized form of the source tile to understand its pattern
norm_source_tile = normalize_tile(source_tile)
norm_source_data = list(norm_source_tile.getdata())
# Get the data from the target tile (these are palette indexes)
target_data = list(target_tile.getdata())
# Create a mapping from the normalized source pattern to the target's indexes
pattern_to_color = {}
for norm_val, target_val in zip(norm_source_data, target_data):
if norm_val not in pattern_to_color: # if this pattern not yet mapped, map to current color in target
pattern_to_color[norm_val] = target_val
# Apply this mapping to create a new tile based on the source pattern but using target's color indexes
new_tile_data = [pattern_to_color[norm_val] for norm_val in norm_source_data]
# Create a new image for the mapped tile
new_tile = Image.new('P', target_tile.size)
new_tile.putpalette(target_tile.getpalette())
new_tile.putdata(new_tile_data)
return new_tile
def reduce_tiles_index(image: Image, tile_size=8, max_unique_tiles=192, similarity_threshold=0.7, use_tile_variance=False, custom_palette_colors=None):
"""
Reduces the number of unique tiles in an image based on similarity.
"""
width, height = image.size
width -= width % tile_size
height -= height % tile_size
# convert to P mode perfectly with exactly same colour info
image = image.crop((0, 0, width, height)).convert('P', colors=256, dither=Image.NONE)
# Initialize variables
tiles = [(x, y, image.crop((x, y, x + tile_size, y + tile_size)))
for y in range(0, height, tile_size)
for x in range(0, width, tile_size)]
# Sort tiles by variance if required
if use_tile_variance:
tiles.sort(key=lambda x: tile_variance(x[2]))
unique_tiles = []
tile_mapping = {}
new_image = Image.new('P', (width, height)) # Use 'P' mode for the new image
image_palette = image.getpalette() # Get the palette of the original image
new_image.putpalette(image_palette) # Apply the same palette to the new image
notice = ''
for x, y, tile in tiles:
best_similarity = -1
best_match = None
for unique_x, unique_y, unique_tile in unique_tiles:
sim = tile_similarity_indexed(tile, unique_tile) # Define or use a suitable function
if sim > best_similarity:
best_similarity = sim
best_match = (unique_x, unique_y, unique_tile)
if best_similarity > similarity_threshold:
tile_mapping[(x, y)] = (best_match[0], best_match[1])
elif len(unique_tiles) < max_unique_tiles:
unique_tiles.append((x, y, tile))
tile_mapping[(x, y)] = (x, y)
else:
best_match = min(unique_tiles, key=lambda ut: tile_similarity_indexed(tile, ut[2]))
tile_mapping[(x, y)] = (best_match[0], best_match[1])
# Paint the new image
for (x, y), (ux, uy) in tile_mapping.items():
original_tile = image.crop((x, y, x + tile_size, y + tile_size)) # Get the original tile
pattern_tile = image.crop((ux, uy, ux + tile_size, uy + tile_size)) # Get the tile to copy the pattern from
new_tile = map_pattern_to_palette(pattern_tile, original_tile) # Create a new tile with the original palette but new pattern
new_image.paste(new_tile, (x, y))
if len(unique_tiles) < max_unique_tiles:
notice = f"Unique tiles used: {len(unique_tiles)}/{max_unique_tiles}\nConsider increasing Tile Similarity Threshold."
else:
remaining_tiles = len(tiles) - len(unique_tiles)
notice += f"Out of spare tiles. Consider reducing Tile Similarity Threshold.\nRemaining tiles: {remaining_tiles}"
return new_image, notice
def reduce_tiles_index(image: Image, tile_size=8, max_unique_tiles=192, similarity_threshold=0.7, use_tile_variance=False, custom_palette_colors=None):
width, height = image.size
width -= width % tile_size
height -= height % tile_size
# convert to P mode perfectly with exactly same colour info
image = image.crop((0, 0, width, height)).convert('P', colors=256, dither=Image.NONE)
# Initialize variables
tiles = [(x, y, image.crop((x, y, x + tile_size, y + tile_size)))
for y in range(0, height, tile_size)
for x in range(0, width, tile_size)]
# Sort tiles by variance if required
if use_tile_variance:
tiles.sort(key=lambda x: tile_variance(x[2]))
unique_tiles = []
tile_mapping = {}
new_image = Image.new('P', (width, height)) # Use 'P' mode for the new image
image_palette = image.getpalette() # Get the palette of the original image
new_image.putpalette(image_palette) # Apply the same palette to the new image
notice = ''
for x, y, tile in tiles:
best_similarity = -1
best_match = None
for unique_x, unique_y, unique_tile in unique_tiles:
sim = tile_similarity_indexed(tile, unique_tile) # Define or use a suitable function
if sim > best_similarity:
best_similarity = sim
best_match = (unique_x, unique_y, unique_tile)
if best_similarity > similarity_threshold:
tile_mapping[(x, y)] = (best_match[0], best_match[1])
elif len(unique_tiles) < max_unique_tiles:
unique_tiles.append((x, y, tile))
tile_mapping[(x, y)] = (x, y)
else:
best_match = min(unique_tiles, key=lambda ut: tile_similarity_indexed(tile, ut[2]))
tile_mapping[(x, y)] = (best_match[0], best_match[1])
# Paint the new image
for (x, y), (ux, uy) in tile_mapping.items():
original_tile = image.crop((x, y, x + tile_size, y + tile_size)) # Get the original tile
pattern_tile = image.crop((ux, uy, ux + tile_size, uy + tile_size)) # Get the tile to copy the pattern from
new_tile = map_pattern_to_palette(pattern_tile,
original_tile) # Create a new tile with the original palette but new pattern
new_image.paste(new_tile, (x, y))
if len(unique_tiles) < max_unique_tiles:
notice = f"Unique tiles used: {len(unique_tiles)}/{max_unique_tiles}\nConsider increasing Tile Similarity Threshold."
else:
remaining_tiles = len(tiles) - len(unique_tiles)
notice += f"Out of spare tiles. Consider reducing Tile Similarity Threshold.\nRemaining tiles: {remaining_tiles}"
return new_image, notice
def reduce_tiles(image, tile_size=8, max_unique_tiles=192, similarity_threshold=0.7):
width, height = image.size
width -= width % tile_size
height -= height % tile_size
image = image.crop((0, 0, width, height))
notice = ''
image = image.convert('P')
# Assuming the image has only four colors
color_counts = image.getcolors(width * height) # Get all colors in the image
top_colors = [color for count, color in color_counts] # Extract the colors
# Gather and sort tiles with variance
tiles = [(x, y, image.crop((x, y, x + tile_size, y + tile_size)))
for y in range(0, height, tile_size)
for x in range(0, width, tile_size)]
if use_tile_variance.value:
tiles.sort(key=lambda x: tile_variance(x[2]))
unique_tiles = [] # Store unique tiles
tile_mapping = {} # Map from old tiles to new tiles (for merged tiles)
new_image = Image.new('RGB', (width, height)) # Prepare the new image
# Add a block for each of the four colors in the image to unique_tiles
for i, color in enumerate(top_colors):
color_tile = Image.new('P', (tile_size, tile_size))
color_tile.putpalette([color])
# Use special coordinates for color blocks
unique_tiles.append((i + 1 * -8, i + 1 * -8, color_tile))
for x, y, tile in tiles:
# Find the most similar tile in the unique set
similarity, (idx, (ux, uy, utile)) = max(
((tile_similarity(tile, utile), (i, unique_tiles[i])) for i, (ux, uy, utile) in enumerate(unique_tiles)),
key=lambda x: x[0]
)
if similarity > similarity_threshold:
# Merge similar tiles by referencing the similar tile in the mapping
tile_mapping[(x, y)] = (ux, uy)
continue
elif len(unique_tiles) < max_unique_tiles:
# Add the tile to the unique set if we have room
unique_tiles.append((x, y, tile))
tile_mapping[(x, y)] = (x, y)
if len(unique_tiles) == max_unique_tiles:
remaining_tiles = len(tiles) - (x // tile_size + y // tile_size * (width // tile_size))
notice = (f"**OUT OF SPARE TILES**\n"
f"Tiles left to process: {remaining_tiles}\n"
f"Consider reducing Tile Similarity Threshold")
else:
# Initialize a variable to store the best match index and its similarity score
best_match_index = -1
best_similarity = -1 # Start with -1 to ensure any real similarity will be higher
# Iterate through all unique tiles to find the most similar one
for i, (ux, uy, utile) in enumerate(unique_tiles):
# Calculate the similarity between the current tile and this unique tile
current_similarity = tile_similarity(tile, utile)
# Update the best match if this tile is more similar than previous ones
if current_similarity > best_similarity:
best_similarity = current_similarity
best_match_index = i
# After finding the most similar tile, retrieve its information
if best_match_index != -1: # Check that we found a match
ux, uy, utile = unique_tiles[best_match_index]
tile_mapping[(x, y)] = (ux, uy)
else:
# This else block should ideally never be hit since we always have unique tiles,
# but it's good practice to handle this case.
# Fallback: Use the first unique tile or handle this error appropriately.
ux, uy, utile = unique_tiles[0] # Default to the first unique tile
tile_mapping[(x, y)] = (ux, uy)
# Paint the new image
for (x, y), (ux, uy) in tile_mapping.items():
tile = image.crop((ux, uy, ux + tile_size, uy + tile_size))
new_image.paste(tile, (x, y)) # Directly pasting the tile without color adjustment
if not notice:
notice = (f"Unique tiles used : {len(unique_tiles)}/{max_unique_tiles}\n"
f"Consider increasing Tile Similarity Threshold.")
return new_image, notice if notice else None
def downscale_image(image: Image, new_width: int, new_height: int, keep_aspect_ratio: bool) -> Image:
if keep_aspect_ratio:
old_width, old_height = image.size
aspect_ratio = old_width / old_height
if new_width / new_height > aspect_ratio:
new_width = int(new_height * aspect_ratio)
else:
new_height = int(new_width / aspect_ratio)
return image.resize((new_width, new_height), Image.NEAREST)
def limit_colors(image, limit=16, quantize=None, dither=None, palette_image=None):
if palette_image:
ppalette = palette_image.getcolors()
color_palette = palette_image.quantize(colors=len(list(set(ppalette))))
else:
color_palette = image.quantize(colors=limit, kmeans=limit if limit else 0, method=quantize,
dither=dither)
image = image.quantize(palette=color_palette, dither=dither)
return image
def create_palette_from_colors(color_list):
# Create an empty image with size (1, len(color_list))
palette_image = Image.new("RGB", (1, len(color_list)))
# Iterate over the colors and set each pixel in the corresponding row
for i, color in enumerate(color_list):
palette_image.putpixel((0, i), color)
# Convert the image to the palette mode
palette_image = palette_image.convert("P", palette=Image.ADAPTIVE)
return palette_image
def convert_to_grayscale(image):
return image.convert("L").convert("RGB")
def convert_to_black_and_white(image: Image, threshold: int = 128, is_inversed: bool = False):
apply_threshold = lambda x: 0 if x > threshold else 255 if is_inversed else 255 if x > threshold else 0
return image.convert('L').point(apply_threshold, mode='1').convert("RGB")
# Gradio UI and processing function
original_width = gr.State(value=0)
original_height = gr.State(value=0)
palette_color_1_string = gr.State(value="#000000")
palette_color_2_string = gr.State(value="#000000")
palette_color_3_string = gr.State(value="#000000")
palette_color_4_string = gr.State(value="#000000")
quantize_for_GBC = gr.State(False)
use_tile_variance = gr.State(False)
def capture_original_dimensions(image):
# Update global variables with the dimensions of the uploaded image
width, height = image.size
return width, height, image # Return original dimensions and the unchanged image for further processing
def adjust_for_aspect_ratio(keep_aspect, current_width, current_height):
if keep_aspect and original_width.value and original_height.value:
# Using the global variables for original dimensions
aspect_ratio = original_width.value / original_height.value
# Calculate the new height based on the new width while maintaining the original aspect ratio
new_height = int(current_width / aspect_ratio)
return current_width, new_height
else:
return current_width, current_height
def create_gradio_interface():
header = '<script async defer data-website-id="f5b8324e-09b2-4d56-8c1f-40a1f1457023" src="https://metrics.prodigle.dev/umami.js"></script>'
with gr.Blocks(head=header) as demo:
with gr.Row():
with gr.Column():
with gr.Row():
image_input = gr.Image(type="pil", label="Input Image")
folder_input = gr.File(label="Input Folder", file_count='directory')
with gr.Row():
new_width = gr.Number(label="Width", value=160)
new_height = gr.Number(label="Height", value=144)
keep_aspect_ratio = gr.Checkbox(label="Keep Aspect Ratio", value=False)
with gr.Row():
logo_resolution = gr.Button("Use Logo Resolution")
original_resolution = gr.Button("Use Original Resolution(Image)")
with gr.Row():
enable_color_limit = gr.Checkbox(label="Limit number of Colors", value=True)
number_of_colors = gr.Slider(label="Target Number of colors (32 max for GB Studio)", minimum=2, maximum=64, step=1, value=4)
limit_4_colors_per_tile = gr.Checkbox(label="Limit to 4 colors per tile, 8 palettes (For GB Studio development only)",
value=False, visible=True)
with gr.Group():
with gr.Row():
reduce_tile_checkbox = gr.Checkbox(label="Reduce to 192 unique 8x8 tiles (Not needed for LOGO scene mode)", value=False)
use_tile_variance_checkbox = gr.Checkbox(label="Sort by tile complexity (Complex tiles get saved first)", value=False)
reduce_tile_similarity_threshold = gr.Slider(label="Tile similarity threshold", minimum=0.3,
maximum=0.99, value=0.8, step=0.01, visible=False)
with gr.Row():
quantization_method = gr.Dropdown(choices=list(QUANTIZATION_METHODS.keys()),
label="Quantization Method", value="libimagequant")
dither_method = gr.Dropdown(choices=list(DITHER_METHODS.keys()), label="Dither Method",
value="None")
with gr.Group():
use_custom_palette = gr.Checkbox(label="Use Custom Color Palette", value=True)
palette_image = gr.Image(label="Color Palette Image", type="pil", visible=True,
value=os.path.join(os.path.dirname(__file__), "gb_palette.png"))
with gr.Group():
gr.Markdown("### Gothic Filter")
enable_gothic_filter = gr.Checkbox(label="Enable Gothic Filter", value=False)
brightness_threshold = gr.Slider(label="Brightness Threshold", minimum=0, maximum=255, value=0,
step=1)
dot_size = gr.Slider(label="Dot Size", minimum=0.25, maximum=6, value=1, step=0.25)
spacing = gr.Slider(label="Spacing", minimum=0, maximum=10, value=1, step=1)
contrast_boost = gr.Slider(label="Contrast Boost", minimum=1.0, maximum=2.0, value=1.5, step=0.1)
noise_factor = gr.Slider(label="Noise Factor", minimum=0, maximum=1, value=0.5, step=0.05)
edge_enhance = gr.Checkbox(label="Edge Enhancement", value=False)
apply_blur = gr.Checkbox(label="Apply Blur", value=False)
irregular_shape = gr.Checkbox(label="Irregular Dot Shape", value=False)
irregular_size = gr.Checkbox(label="Irregular Dot Size", value=False)
is_grayscale = gr.Checkbox(label="Convert to Grayscale", value=False)
with gr.Row():
is_black_and_white = gr.Checkbox(label="Convert to Black and White", value=False)
black_and_white_threshold = gr.Slider(label="Black and White Threshold", minimum=0, maximum=255,
value=128, visible=False)
is_black_and_white.change(lambda x: gr.update('black_and_white_threshold', visible=x),
inputs=[is_black_and_white], outputs=[black_and_white_threshold])
# Logic to capture and display original image dimensions
def capture_original_dimensions(image):
# Update the global variables with the dimensions of the uploaded image
if image is None:
return None
width, height = image.size
original_width.value = width
original_height.value = height
return image # Return unchanged image for further processing
def limit_4_colors_per_tile_change(x):
quantize_for_GBC.value = x
return quantize_for_GBC.value
limit_4_colors_per_tile.change(limit_4_colors_per_tile_change, inputs=[limit_4_colors_per_tile])
def on_use_tile_variance_click(x):
use_tile_variance.value = x
return x
use_tile_variance_checkbox.change(on_use_tile_variance_click, inputs=[use_tile_variance_checkbox])
image_input.change(
fn=capture_original_dimensions,
inputs=[image_input],
outputs=[image_input]
)
def on_logo_resolution_click():
# Return the values you want to update in the UI components
# No need to call .update() on individual components here, just return the new values
return False, 160, 144
# In the logo_resolution button click setup,
# Ensure you're mapping the outputs of the function to the correct UI elements
logo_resolution.click(
fn=on_logo_resolution_click,
outputs=[keep_aspect_ratio, new_width, new_height]
# The outputs should correspond to the UI components you want to update
)
def on_original_resolution_click():
return False, original_width.value, original_height.value
original_resolution.click(
fn=on_original_resolution_click,
outputs=[keep_aspect_ratio, new_width, new_height]
)
# Dynamic updates based on aspect ratio checkbox and width changes
keep_aspect_ratio.change(
fn=adjust_for_aspect_ratio,
inputs=[keep_aspect_ratio, new_width, new_height],
outputs=[new_width, new_height]
)
new_width.change(
fn=adjust_for_aspect_ratio,
inputs=[keep_aspect_ratio, new_width, new_height],
outputs=[new_width, new_height]
)
with gr.Column():
with gr.Group():
with gr.Row():
with gr.Column():
image_output = gr.Image(type="pil", label="Output Image", height=300)
with gr.Column():
image_output_no_palette = gr.Image(type="pil", label="Output Image (Natural Palette)",
height=300)
with gr.Row():
with gr.Column():
notice_text = gr.Text(value="No Warnings", lines=3, max_lines=3, autoscroll=False, interactive=False, label="Warnings", show_label=False)
with gr.Column():
kofi_html = gr.HTML(
"<a href='https://ko-fi.com/prodigle' target='_blank'><img height='36' style='border:0px; margin:auto; padding: 5px; width: 100%' src='https://cdn.ko-fi.com/cdn/kofi1.png?v=2' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a>")
with gr.Row():
with gr.Column():
palette_text = gr.Textbox(label="Custom Palette Info", value="None", interactive=False,
show_copy_button=True, lines=4, max_lines=4, autoscroll=False)
with gr.Row():
execute_button = gr.Button("Convert Image")
execute_button_folder = gr.Button("Convert Folder")
image_output_zip = gr.File(label="Output Folder Zip", type="filepath")
reduce_tile_checkbox.change(lambda x: gr.update('reduce_tile_checkbox', visible=x),
inputs=[reduce_tile_checkbox], outputs=[reduce_tile_similarity_threshold])
use_custom_palette.change(lambda x: gr.update('palette_image', visible=x),
inputs=[use_custom_palette], outputs=[palette_image])
def extract_tiles(image, tile_size=(8, 8)):
"""Extract 8x8 tiles from the image."""
tiles = []
for y in range(0, image.height, tile_size[1]):
for x in range(0, image.width, tile_size[0]):
box = (x, y, x + tile_size[0], y + tile_size[1])
tiles.append(image.crop(box))
return tiles
def generate_palette(tile, num_colors=4):
"""Generate a 4-color palette for an 8x8 tile using K-means clustering.
Args:
tile: The input tile as a PIL image or a NumPy array.
num_colors: The number of colors to include in the palette.
Returns:
A NumPy array representing the palette, with each color as a row.
"""
# Ensure the input is a NumPy array and has the correct shape
if not isinstance(tile, np.ndarray):
tile = np.array(tile)
if tile.shape[0] * tile.shape[1] < num_colors:
raise ValueError("Tile is too small for the number of colors requested")
# Reshape the tile data for K-means clustering
data = tile.reshape((-1, 3))
# Perform K-means clustering to find the dominant colors
kmeans = KMeans(n_clusters=num_colors, random_state=0).fit(data)
palette = kmeans.cluster_centers_.round().astype(
int) # Round before converting to int for more accurate colors
return palette
def get_color_distribution(tile):
"""
Analyzes the tile and returns a frequency distribution of its colors.
"""
# Flatten the tile to a list of colors
data = tile.reshape(-1, 3)
# Count the frequency of each color
colors, counts = np.unique(data, axis=0, return_counts=True)
# Create a normalized distribution (frequency)
total = counts.sum()
distribution = {tuple(color): count / total for color, count in zip(colors, counts)}
return distribution
import numpy as np
from scipy.stats import wasserstein_distance
import numpy as np
from scipy.stats import wasserstein_distance
from sklearn.metrics import pairwise_distances
import numpy as np
from sklearn.metrics import pairwise_distances
def find_best_matching_palette(tile_distribution, existing_palettes, adjacent_distributions=None,
balance_factor=0.5, key_color_weight=1):
"""
Enhanced palette matching considering color distribution, adjacent tiles, balance between local and global harmony, and key colors.
Args:
tile_distribution (dict): Color distribution of the current tile.
existing_palettes (list): List of available palettes to choose from.
adjacent_distributions (list): List of color distributions from adjacent tiles.
balance_factor (float): Balances between matching the tile's own colors and blending with adjacent tiles.
key_color_weight (float): Additional weight given to key colors to ensure their presence in the selected palette.
Returns:
int: Index of the best matching palette.
"""
best_score = float('inf')
best_palette_index = None
# Define key colors that need special attention (e.g., white, black, skin tones)
key_colors = [(255, 255, 255), (0, 0, 0)] # White and black
# Iterate through each candidate palette
for palette_index, palette in enumerate(existing_palettes):
palette_colors = np.array(palette)
tile_score = 0
# Calculate how well the palette matches the tile's own color distribution
for color, frequency in tile_distribution.items():
distances = pairwise_distances([color], palette_colors, metric='euclidean')[0]
closest_distance = np.min(distances)
# Apply additional weight to key colors
weight = key_color_weight if color in key_colors else 1
tile_score += frequency * closest_distance * weight
# Context-aware selection: consider adjacent tiles if available
context_score = 0
if adjacent_distributions:
for adj_dist in adjacent_distributions:
adj_score = 0
for adj_color, adj_freq in adj_dist.items():
distances = pairwise_distances([adj_color], palette_colors, metric='euclidean')[0]
closest_distance = np.min(distances)
# Apply additional weight to key colors in adjacent tiles
weight = key_color_weight if adj_color in key_colors else 1
adj_score += adj_freq * closest_distance * weight
context_score += adj_score # Accumulate context score from all adjacent tiles
# Average the context score based on the number of adjacent tiles considered
context_score /= len(adjacent_distributions)
# Combine tile score and context score using the balance factor
combined_score = (1 - balance_factor) * tile_score + balance_factor * context_score
# Update best palette if current one is better
if combined_score < best_score:
best_score = combined_score
best_palette_index = palette_index
return best_palette_index
import numpy as np
from PIL import Image
from PIL import Image
import numpy as np
from skimage.color import deltaE_ciede2000
from skimage.color import rgb2lab, lab2rgb
from skimage import color # Import the color module from scikit-image
def apply_palette(tile, palette):
"""
Applies a palette to a tile using vectorized operations for efficiency.
"""
tile_array = np.array(tile)
palette_array = np.array(palette)
# Convert tile and palette to LAB color space
tile_lab = rgb2lab(tile_array)
palette_lab = rgb2lab(palette_array[np.newaxis, :, :])
# Expand dimensions for broadcasting
tile_lab_expanded = tile_lab[:, :, np.newaxis, :]
# Calculate color distances in a vectorized manner
distances = np.linalg.norm(tile_lab_expanded - palette_lab, axis=3)
# Find the index of the closest palette color for each pixel
closest_palette_indices = np.argmin(distances, axis=2)
# Map the tile to the new palette using advanced indexing
new_tile_array = palette_array[closest_palette_indices]
# Convert back to PIL Image
new_tile = Image.fromarray(np.uint8(new_tile_array), 'RGB')