Ip Lab Programs
Ip Lab Programs
Ip Lab Programs
plt.subplot(1, 2, 2)
plt.imshow(quad2)
plt.title("2")
plt.axis("off")
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.imshow(quad3)
plt.title("3")
plt.axis("off")
plt.subplot(1, 2, 2)
plt.imshow(quad4)
plt.title("4")
plt.axis("off")
plt.show()
output
Additional pgm
# Up- down
import cv2
import numpy as np
# Read the image
img = cv2.imread(image_path)
# Get the height and width of the image
height, width = img.shape[:2]
up = img[:height//2,:]
down = img[height//2:,:]
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.imshow(up)
plt.title("Up")
plt.axis("off")
plt.subplot(1, 2, 2)
plt.imshow(down)
plt.title("down")
plt.axis("off")
plt.show()
# left- right
import cv2
import numpy as np
# Read the image
img = cv2.imread('/content/3.PNG')
up = img[:height//2,:]
down = img[height//2:,:]
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.imshow(left)
plt.title("left")
plt.axis("off")
plt.subplot(1, 2, 2)
plt.imshow(right)
plt.title("right")
plt.axis("off")
plt.show()
9. Read an image and extract and display low-level features such as edges, textures
using filtering techniques.
import cv2
import numpy as np
# Edge detection
edges = cv2.Canny(gray, 100, 200) # Use Canny edge detector
# Texture extraction
kernel = np.ones((5, 5), np.float32) / 25 # Define a 5x5 averaging kernel
texture = cv2.filter2D(gray, -1, kernel) # Apply the averaging filter for texture
extraction
Output
plt.subplot(1, 2, 2)
plt.imshow(filtering(image_array, sharpen()),cmap='gray')
plt.title("Blurred Image")
plt.axis("off")
plt.show()
Extra programs:
1. #blur
import cv2
# Read the input image (replace 'your_image.jpg' with the actual image path)
image_path = '1.png'
image = cv2.imread(image_path)
image_path = '1.png'
image = cv2.imread(image_path)
# Apply thresholding (you can use other techniques like Sobel edges)
_, binary_image = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
# Find contours
contours, _ = cv2.findContours(binary_image, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# Read the input image (replace 'your_image.jpg' with the actual image path)
image_path = 'face.jpeg'
image = cv2.imread(image_path)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
# Perform face detection (you can use any pre-trained face detection model)
# For example, using Haar Cascade classifier:
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades +
'haarcascade_frontalface_default.xml')
faces = face_cascade.detectMultiScale(frame, scaleFactor=1.1, minNeighbors=5,
minSize=(30, 30))
cv2.destroyAllWindows()
#face detection with emotions
import cv2
from deepface import DeepFace
Extra programs in IP
1. import cv2
flipped_image = cv2.flip(image,1 )
flipped_image = cv2.flip(image,0 )
2. # Thresholding
import cv2
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
ret,thresh_binary = cv2.threshold(gray_image,127,255,cv2.THRESH_BINARY)
ret,thresh_binary_inv =
cv2.threshold(gray_image,127,255,cv2.THRESH_BINARY_INV)
ret,thresh_trunc = cv2.threshold(gray_image,127,255,cv2.THRESH_TRUNC)
ret,thresh_tozero = cv2.threshold(gray_image,127,255,cv2.THRESH_TOZERO)
ret,thresh_tozero_inv =
cv2.threshold(gray_image,127,255,cv2.THRESH_TOZERO_INV)
for i in range(6):
plt.subplot(2,3,i+1),plt.imshow(images[i],'gray')
plt.title(names[i])
plt.xticks([]),plt.yticks([])
plt.show()
output
plt.subplot(1, 2, 2)
plt.imshow(filtering(image_array, sharpen()),cmap='gray')
plt.title("Blurred Image")
plt.axis("off")
plt.show()
pgm3:
#Color image to Gray image
import numpy as np
import cv2
import matplotlib.pyplot as plt
def rgb2gray(image):
return np.dot(image[..., :3], [0.2989, 0.5870, 0.1140])
filename = '1.png'
image = cv2.imread("/content/sample_data/JS pp photo.jpg")
image_array = np.array(image)
grayscale_image = rgb2gray(image_array)
print(image_array.shape)
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.imshow(image_array)
plt.title("Original Image")
plt.axis("off")
plt.subplot(1, 2, 2)
plt.imshow(grayscale_image,cmap='gray')
plt.title("Grayscale Image")
plt.axis("off")
plt.show()
pgm4:
#Rotating an image
filename = '/content/sample_data/JS pp photo.jpg'
image = cv2.imread(filename,cv2.IMREAD_UNCHANGED)
image_array = np.array(image)
def get_rotation(angle):
angle = np.radians(angle)
return np.array([
[np.cos(angle), -np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1]
])
img_transformed = np.zeros((400,400,3), dtype=np.uint8)
R1 = get_rotation(45)
for i, row in enumerate(image_array):
for j, col in enumerate(row):
pixel_data = image_array[i, j, :]
plt.subplot(1, 2, 2)
plt.imshow(img_transformed)
plt.title("Rotated Image")
plt.axis("off")
plt.show()
cv2.imwrite('/content/sample_data/rotate.jpg',img_transformed)
pgm5:
import cv2
from google.colab.patches import cv2_imshow
img = cv2.imread('/content/sample_data/Colors.jpg',-1)
cv2_imshow(img)
cv2.waitKey(0)
cv2.destroyAllWindows()
#prgm-8
image = cv2.imread('/content/sample_data/Rainbow.jpg',1)
B, G, R = cv2.split(image)
# Corresponding channels are separated
cv2_imshow(image)
cv2.waitKey(0)
cv2_imshow(B)
cv2.waitKey(0)
cv2_imshow(G)
cv2.waitKey(0)
cv2_imshow(R)
cv2.waitKey(0)
cv2.destroyAllWindows()
Pgm6:
import cv2
from google.colab.patches import cv2_imshow
img1 = cv2.imread("/content/sample_data/do_not_copy.png")
#img1=cv2.imread("")
print(img1.shape)
img2 = cv2.imread("/content/sample_data/3.png")
img2 = cv2.resize(img2,(224,225))
print(img2.shape)
final_img = cv2.addWeighted(img2,1,img1,0.7,0)
cv2_imshow(final_img)
cv2.imwrite('/content/sample_data/rgbchannels.jpg',image)
filename = '/content/sample_data/smaple.jpg'
image = cv2.imread(filename,cv2.IMREAD_GRAYSCALE)
image_array = np.array(image)
def sharpen():
return np.array([
[0,-1, 0],
[-1,10, -1],
[0,-1, 0]
])
def filtering(image, kernel):
m, n = kernel.shape
if (m == n):
y, x = image.shape
y = y - m + 1 # shape of image - shape of kernel + 1
x=x-m+1
new_image = np.zeros((y,x))
for i in range(y):
for j in range(x):
new_image[i][j] = np.sum(image[i:i+m, j:j+m]*kernel)
return new_image
# Display the original and sharpened images
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.imshow(image_array, cmap='gray')
plt.title("Original Grayscale Image")
plt.axis("off")
plt.subplot(1, 2, 2)
plt.imshow(filtering(image_array, sharpen()),cmap='gray')
plt.title("Sharpened Image")
plt.axis("off")
plt.show()
pgm7:
img = cv2.imread("/content/sample_data/smaple.jpg",cv2.IMREAD_GRAYSCALE)
image_array = np.array(img)
print(image_array)
def sharpen():
return np.array([
[1/9,1/9,1/9],[1/9,1/9,1/9],[1/9,1/9,1/9]
])
def filtering(image, kernel):
m, n = kernel.shape
if (m == n):
y, x = image.shape
y = y - m + 1 # shape of image - shape of kernel + 1
x=x-m+1
new_image = np.zeros((y,x))
for i in range(y):
for j in range(x):
new_image[i][j] = np.sum(image[i:i+m, j:j+m]*kernel)
return new_image
# Display the original and sharpened images
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.imshow(image_array,cmap='gray')
plt.title("Original Grayscale Image")
plt.axis("off")
plt.subplot(1, 2, 2)
plt.imshow(filtering(image_array, sharpen()),cmap='gray')
plt.title("Blurred Image")
plt.axis("off")
plt.show()
pgm8:
#Guassian Blur
img = cv2.imread("/content/sample_data/smaple.jpg",cv2.IMREAD_GRAYSCALE)
image_array = np.array(img)
print(image_array)
def sharpen():
return np.array([
[1/16,2/16,1/16],[2/16,4/16,2/16],[1/16,2/16,1/16]
])
def filtering(image, kernel):
m, n = kernel.shape
if (m == n):
y, x = image.shape
y = y - m + 1 # shape of image - shape of kernel + 1
x=x-m+1
new_image = np.zeros((y,x))
for i in range(y):
for j in range(x):
new_image[i][j] = np.sum(image[i:i+m, j:j+m]*kernel)
return new_image
# Display the original and sharpened images
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.imshow(image_array,cmap='gray')
plt.title("Original Grayscale Image")
plt.axis("off")
plt.subplot(1, 2, 2)
plt.imshow(filtering(image_array, sharpen()),cmap='gray')
plt.title("Guassian Blurred Image")
plt.axis("off")
plt.show()
pgm9:
#Ridge Detection
img = cv2.imread("/content/sample_data/JS pp photo.jpg",cv2.IMREAD_GRAYSCALE)
image_array = np.array(img)
print(image_array)
def sharpen():
return np.array([
[0,-1,0],[-1,0,-1],[0,-1,0]
])
def filtering(image, kernel):
m, n = kernel.shape
if (m == n):
y, x = image.shape
y = y - m + 1 # shape of image - shape of kernel + 1
x=x-m+1
new_image = np.zeros((y,x))
for i in range(y):
for j in range(x):
new_image[i][j] = np.sum(image[i:i+m, j:j+m]*kernel)
return new_image
# Display the original and sharpened images
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.imshow(image_array,cmap='gray')
plt.title("Original Grayscale Image")
plt.axis("off")
plt.subplot(1, 2, 2)
plt.imshow(filtering(image_array, sharpen()),cmap='gray')
plt.title("Ridge detection Image")
plt.axis("off")
plt.show()
pgm10:
#Edge Detection
img = cv2.imread("/content/sample_data/JS pp photo.jpg",cv2.IMREAD_GRAYSCALE)
image_array = np.array(img)
print(image_array)
def sharpen():
return np.array([
[-1,-1,-1],[-1,8,-1],[-1,-1,-1]
])
def filtering(image, kernel):
m, n = kernel.shape
if (m == n):
y, x = image.shape
y = y - m + 1 # shape of image - shape of kernel + 1
x=x-m+1
new_image = np.zeros((y,x))
for i in range(y):
for j in range(x):
new_image[i][j] = np.sum(image[i:i+m, j:j+m]*kernel)
return new_image
# Display the original and sharpened images
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.imshow(image_array,cmap='gray')
plt.title("Original Grayscale Image")
plt.axis("off")
plt.subplot(1, 2, 2)
plt.imshow(filtering(image_array, sharpen()),cmap='gray')
plt.title("edge detection Image")
plt.axis("off")
plt.show()
pgm11:
# comment
import cv2
from google.colab.patches import cv2_imshow
image = cv2.imread('/content/3.png',1)
B, G, R = cv2.split(image)
# Corresponding channels are separated
cv2_imshow(image)
cv2.waitKey(0)
cv2_imshow(B)
cv2.waitKey(0)
cv2_imshow(G)
cv2.waitKey(0)
cv2_imshow(R)
cv2.waitKey(0)
cv2.destroyAllWindows()
#Image resizing 1
import cv2
from google.colab.patches import cv2_imshow
img1 = cv2.imread("/content/sample_data/do_not_copy.png")
print(img1.shape)
img2 = cv2.imread("/content/sample_data/3.png")
img2 = cv2.resize(img2,(224,225))
print(img2.shape)
final_img = cv2.addWeighted(img2,1,img1,0.7,0)
cv2_imshow(final_img)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
#Image resizing 2
import cv2
from google.colab.patches import cv2_imshow
img1 = cv2.imread("/content/sample_data/circle.png")
#img1=cv2.imread("")
print(img1.shape)
img2 = cv2.imread("/content/sample_data/square.png")
img2 = cv2.resize(img2,(img1.shape[1],img1.shape[0]))
print(img2.shape)
final_img = cv2.addWeighted(img1,0.7,img2,0.6,0)
cv2_imshow(final_img)
#Image subtraction
import cv2
from google.colab.patches import cv2_imshow
img_1 = cv2.imread('/content/sample_data/square.png')
print(img_1.shape)
img_2 = cv2.imread('/content/sample_data/circle.png')
print(img_2.shape)
final_img = cv2.subtract(img_2,img_1)
cv2_imshow(final_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
img_2 = cv2.imread('/content/sample_data/circle.png',0)
img_2 = cv2.resize(img_2,(img1.shape[1],img_1.shape[0]))
final_img = cv2.subtract(img_2,img_1)
cv2_imshow(final_img)
pgm12:
#Image translation
import cv2
import numpy as np
pgm13:
import cv2
import numpy as np
# Translate the image by dx=20 pixels and dy=0 pixels, translate horizontally by 20px
translated_image = translate_image(image, dx=20, dy=0)
# Save the translated image to disk
cv2.imwrite('translated_image.png', translated_image)
pgm14:
#Image Zoom in
import cv2
import numpy as np
# Read the image
image = cv2.imread('//content/sample_data/circle.png')
pgm15:
pgm16:
pgm17:
ret,thresh_binary = cv2.threshold(gray_image,127,255,cv2.THRESH_BINARY)
ret,thresh_binary_inv =
cv2.threshold(gray_image,127,255,cv2.THRESH_BINARY_INV)
ret,thresh_trunc = cv2.threshold(gray_image,127,255,cv2.THRESH_TRUNC)
ret,thresh_tozero = cv2.threshold(gray_image,127,255,cv2.THRESH_TOZERO)
ret,thresh_tozero_inv =
cv2.threshold(gray_image,127,255,cv2.THRESH_TOZERO_INV)
for i in range(6):
plt.subplot(2,3,i+1),plt.imshow(images[i],'gray')
plt.title(names[i])
plt.xticks([]),plt.yticks([])
plt.show()
pgm17:
#Zooming out of an image
import cv2
from google.colab.patches import cv2_imshow
# Read the input image
original_image = cv2.imread('/content/sample_data/3.png')
pgm18:
import cv2
import numpy as np
# Read the image
from google.colab.patches import cv2_imshow
img = cv2.imread('/content/3.png')
cv2.imwrite('quad1.png', quad1)
cv2.imwrite('quad2.png', quad2)
cv2.imwrite('quad3.png', quad3)
cv2.imwrite('quad4.png', quad4)
pgm19:
import cv2
import numpy as np
# Read the image
img = cv2.imread('/content/sample_data/3.png')
plt.subplot(1, 2, 2)
plt.imshow(quad2)
plt.title("2")
plt.axis("off")
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.imshow(quad3)
plt.title("3")
plt.axis("off")
plt.subplot(1, 2, 2)
plt.imshow(quad4)
plt.title("4")
plt.axis("off")
plt.show()
pgm20:
# Up- down
import cv2
import numpy as np
# Read the image
img = cv2.imread('/content/3.png')
up = img[:height//2,:]
down = img[height//2:,:]
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.imshow(up)
plt.title("Up")
plt.axis("off")
plt.subplot(1, 2, 2)
plt.imshow(down)
plt.title("down")
plt.axis("off")
plt.show()
pgm21:
# Up- down
import cv2
import numpy as np
# Read the image
img = cv2.imread('/content/3.png')
up = img[:height//2,:]
down = img[height//2:,:]
plt.figure(figsize=(10, 5))
plt.subplot(2, 1, 1)
plt.imshow(up)
plt.title("Up")
plt.axis("off")
plt.subplot(2, 1, 2)
plt.imshow(down)
plt.title("down")
plt.axis("off")
plt.show()
pgm21:
# left right
import cv2
import numpy as np
# Read the image
img = cv2.imread('/content/sample_data/JS pp photo.jpg')
#height means all rows and
#width means all the columns
# Get the height and width of the image
height, width = img.shape[:2]
left = img[:, :width//2]
right = img[:, width//2:]
up = img[:height//2,:]
down = img[height//2:,:]
plt.figure(figsize=(10, 5))
plt.subplot(2, 1, 1)
plt.imshow(left)
plt.title("Left")
plt.axis("off")
plt.subplot(2, 1, 2)
plt.imshow(right)
plt.title("right")
plt.axis("off")
plt.show()
plt.subplot(1, 2, 2)
plt.imshow(quad2)
plt.title("2")
plt.axis("off")
plt.figure(figsize=(5, 10))
plt.subplot(1, 2, 1)
plt.imshow(quad3)
plt.title("3")
plt.axis("off")
plt.subplot(1, 2, 2)
plt.imshow(quad4)
plt.title("4")
plt.axis("off")
plt.show()
pgm22:
import cv2
import numpy as np
image = cv2.imread('/content/sample_data/JS pp photo.jpg')