#https://numpy.org/doc/stable/user/absolute_beginners.html #https://docs.python.org/3/tutorial/introduction.html#lists from sklearn.datasets import fetch_olivetti_faces from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import ExtraTreesRegressor from sklearn.ensemble import VotingRegressor from sklearn.ensemble import GradientBoostingRegressor import sklearn from sklearn.tree import export_text from sklearn import tree from matplotlib import image from matplotlib import pyplot from PIL import Image import numpy from sklearn.linear_model import LinearRegression from sklearn.linear_model import RidgeCV import glob #The glob module finds all the pathnames matching a specified pattern according to the rules used by the Unix shell, # # although results are returned in arbitrary order. filelist = glob.glob('coil-100/*.png') #list comprehension for each filename in filelist, open, convert to grayscale L , flatten to 128x128=16384 data = numpy.array([numpy.array(Image.open(fname).convert('L')).flatten() for fname in filelist]) print(data.shape, "shape db") #(7200, 128, 128, 3) shape db #(7200, 16384) shape db mask = numpy.random.choice([False, True], len(data), p=[0.90, 0.10]) data = data[mask] #only first 500 # load image to predice as pixel array img = Image.open('door3.jpg').convert('L') # summarize shape of the pixel array imgarr = numpy.array(img) print(imgarr.shape) #imgarrf = imgarr / 255 #normalize imgarrf = imgarr tophalflady = imgarrf.flatten()[0: 8192] #16384 / 2 pyplot.imshow( tophalflady.reshape((64,128)) ) #reshape to 32 rows and 64 columns pyplot.show() print(len(data)) #400 rows print(len(data[0])) #4096 columns = 64 x 64 = 4096 n_pixels = data.shape[1] # (400,4096) [1] = 4096 print(n_pixels) #should be 4096 flattened #slice the list data and target using index : meens start # Upper half of the faces from 0 numpy arrays [rom start to middle] X_train = data[:, : (n_pixels + 1) // 2] # Lower half of the faces [for all from end to middle] from end to y_target = data[:, n_pixels // 2 :] #clf = ExtraTreesRegressor(n_estimators=3, max_depth=48, max_features=96, random_state=0).fit(X_train,y_target) #clf = RandomForestRegressor(n_estimators=10, max_depth=None).fit(X_train,y_target) clf = ExtraTreesRegressor().fit(X_train,y_target) #clf = sklearn.neighbors.KNeighborsRegressor(n_neighbors=200,weights='distance',algorithm='brute').fit(X_train,y_target) print('to fit') #clf = LinearRegression().fit(X_train, y_target) #clf = RidgeCV().fit(X_train, y_target) #clf = RandomForestRegressor(n_estimators=10).fit(X_train, y_target) #clf = DecisionTreeRegressor(max_depth=30).fit(X_train, y_target) print('to predict') arrpredict= clf.predict([ tophalflady ]) # load and display an image with Matplotlib #print(tophalflady.shape)#2048,1 tophalflady = numpy.expand_dims(tophalflady, axis=0) #print(tophalflady.shape)#2048,1 #print(arrpredict.shape) #1,2048 linear flatten array, reshape to 2d #arrpredict = numpy.transpose(arrpredict) print(arrpredict.shape) #1,2048 linear flatten array, reshape to 2d print(tophalflady.shape)#2048,1 #merge array top and predicted topbottom = numpy.concatenate((tophalflady, arrpredict), axis=1) print(topbottom.shape,'final shape') # display the array of pixels as an image #The image is quantized to 256 grey levels and stored as unsigned 8-bit integers; # #the loader will convert these to floating point values on the interval [0, 1], # which are easier to work with for many algorithms. from numpy import asarray #pyplot.imshow(oneimg.reshape( (64,64))) #reshape to 64 by 64 pyplot.imshow( topbottom.reshape((128,128)) ) #reshape to 32 rows and 64 columns pyplot.show()