Then you will need to duplicate the codes (from reading image and corresponding label file to patch) for three images. Give them different names but put them in the same folder. # read train image with rasterio.open("path2img/img1.tif") as src: img1 = src.read() with rasterio.open("path2img/img2.tif") as src: img2 = src.read() # read train mask with rasterio.open("path2label/label1.tif") as src: lbl1 = src.read() with rasterio.open("path2label/label2.tif") as src: lbl2 = src.read() # define criterias PATCH_SIZE = 256 N_BANDS_S1 = img1.shape[-1] N_BANDS_LBL = lbl1.shape[-1] print(N_BANDS_S1) print(N_BANDS_LBL) # change order of the array, so the n channel is at axis -1 (last axis) img1 = np.transpose(img1, (1,2,0)) img2 = np.transpose(img2, (1,2,0)) lbl1 = np.transpose(lbl1, (1,2,0)) lbl2 = np.transpose(lbl2, (1,2,0)) print(img1.shape) print(img2.shape) print(lbl1.shape) print(lbl2.shape) # Create patches patches_img1 = patchify(img1, (PATCH_SIZE, PATCH_SIZE, N_BANDS_S1), step=PATCH_SIZE) patches_img2 = patchify(img2, (PATCH_SIZE, PATCH_SIZE, N_BANDS_S1), step=PATCH_SIZE) if not os.path.exists('OUTPUTFOLDER'): os.makedirs('OUTPUTFOLDER') for i in range(patches_img1.shape[0]): for j in range(patches_img1.shape[1]): single_patch_img = patches_img1[i,j,:,:] tifffile.imwrite('OUTPUTFOLDER/' + 'img1_' + str(i) + '_' + str(j) + ".tif", single_patch_img) for i in range(patches_img2.shape[0]): for j in range(patches_img2.shape[1]): single_patch_img = patches_img2[i,j,:,:] tifffile.imwrite('OUTPUTFOLDER/' + 'img2_' + str(i) + '_' + str(j) + ".tif", single_patch_img) # do the same for label patches_lbl1 = patchify(lbl1, (PATCH_SIZE, PATCH_SIZE, N_BANDS_LBL), step=PATCH_SIZE) patches_lbl2 = patchify(lbl2, (PATCH_SIZE, PATCH_SIZE, N_BANDS_LBL), step=PATCH_SIZE) if not os.path.exists('OUTPUTFOLDER_LBL'): os.makedirs('OUTPUTFOLDER_LBL') for i in range(patches_lbl1.shape[0]): for j in range(patches_lbl1.shape[1]): single_patch_img = patches_img00[i,j,:,:] tifffile.imwrite('msk/' + 'msk00_' + str(i) + '_' + str(j) + ".tif", single_patch_img) ............. #sort #make array based on geotiff patches #store each as npy
Hi, to train a convolutional neural network model for semantic segmentation/pixel-based classification, it is required to have an area-based samples (e.g. image & corresponding label patch both with same size/x y dimension). These could be remote sensing image and its corresponding reference map/raster with pixel representing target class hence the name label, short for "labeled dataset" or "annotated dataset" or "mask". While in my example I used publicly available oil palm map as label ru-vid.com/video/%D0%B2%D0%B8%D0%B4%D0%B5%D0%BE-fEVgEP2VyWM.html, it is common to digitize the target features (e.g. buildings, specific land cover, or multiple land covers depends on the purpose) on-screen and transform it into "label" raster. Other way to generate label without digitizing: ru-vid.com/video/%D0%B2%D0%B8%D0%B4%D0%B5%D0%BE-YHA_-QMB8_U.html. You still need to put the class attribute later.