ann
prac
Group B - 2. Write a python program to illustrate ART neural network.
class ART1:
def __init__(self,threshold=0.7):
self.threshold=threshold
self.cluster=[]
def match(self,pattern,cluster):
if sum(pattern)==0:
return false
match_score=sum(map(min,pattern,cluster))/sum(pattern)
return match_score>=self.threshold
def train(self,data):
for pattern in data:
for i,cluster in enumerate(self.cluster):
if self.match(pattern,cluster):
self.cluster[i]=[min(p,c) for p,c in zip(pattern,cluster)]
break
else:
self.cluster.append(pattern.copy())
def show_cluster(self):
for i,cluster in enumerate(self.cluster):
print(f"cluster {i+1}:{cluster}")
n=int(input("enter pattern"))
size=int(input("enter size"))
data=[]
for i in range(n):
pattern=list(map(int,input("enter pattern").split()))
data.append(pattern)
model=ART1(threshold=0.7)
model.train(data)
model.show_cluster()
Group A- 1. Write a Python program to plot a few activation functions that are being used in neural networks.
import numpy as np
import matplotlib.pyplot as plt
# Define activation functions
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def tanh(x):
return np.tanh(x)
def relu(x):
return np.maximum(0, x)
def leaky_relu(x, alpha=0.01):
return np.where(x > 0, x, x * alpha)
def elu(x, alpha=1.0):
return np.where(x >= 0, x, alpha * (np.exp(x) - 1))
# Input range
x = np.linspace(-10, 10, 400)
# Compute function values
activations = {
"Sigmoid": sigmoid(x),
"Tanh": tanh(x),
"ReLU": relu(x),
"Leaky ReLU": leaky_relu(x),
"ELU": elu(x)
}
# Plot in subplots
plt.figure(figsize=(12, 10))
for i, (name, y) in enumerate(activations.items(), start=1):
plt.subplot(3, 2, i)
plt.plot(x, y, label=name, color='teal', linewidth=2)
plt.title(name)
plt.xlabel("Input")
plt.ylabel("Output")
plt.grid(True)
plt.axhline(0, color='black', linewidth=0.5)
plt.axvline(0, color='black', linewidth=0.5)
plt.legend()
plt.tight_layout()
plt.suptitle("Neural Network Activation Functions", fontsize=16, y=1.02)
plt.show()
Group A- 2. Generate ANDNOT function using McCulloch-Pitts neural net by a python program.
def mc_andnot(a, b):
# Weights for A and B
w1 = 1 # A
w2 = -1 # NOT B
threshold = 1
# Net input
net_input = a * w1 + b * w2
# Step activation function
output = 1 if net_input >= threshold else 0
return output
# Test the function for all input combinations
print("A B | A AND NOT B")
for a in [0, 1]:
for b in [0, 1]:
result = mc_andnot(a, b)
print(f"{a} {b} | {result}")
Group A- 3 - 3. Write a Python Program using Perceptron Neural Network to recognise even and odd numbers. Given numbers are in ASCII form 0 to 9
import numpy as np
step = lambda x: 1 if x >= 0 else 0
data = [
([1,1,0,0,0,0], 1), ([1,1,0,0,0,1], 0), ([1,1,0,0,1,0], 1),
([1,1,0,1,1,1], 0), ([1,1,0,1,0,0], 1), ([1,1,0,1,0,1], 0),
([1,1,0,1,1,0], 1), ([1,1,0,1,1,1], 0), ([1,1,1,0,0,0], 1),
([1,1,1,0,0,1], 0)
]
w = np.zeros(6)
for x, y in data:
x = np.array(x)
w += (y - step(np.dot(x, w))) * x
j = int(input("Enter number (0-9): "))
xj = np.array([int(b) for b in format(j, '06b')])
print(f"{j} is", "even" if step(np.dot(xj, w)) else "odd")
Group A - 4
4. With a suitable example demonstrate the perceptron learning law with its decision regions using python. Give the output in graphical form.
import numpy as np
import matplotlib.pyplot as plt
# AND gate training data
X = np.array([
[0, 0],
[0, 1],
[1, 0],
[1, 1]
])
# Output labels for AND gate
y = np.array([0, 0, 0, 1])
# Initialize weights and bias
weights = np.zeros(2)
bias = 0
learning_rate = 0.1
epochs = 10
# Perceptron training rule
for epoch in range(epochs):
for i in range(len(X)):
linear_output = np.dot(X[i], weights) + bias
prediction = 1 if linear_output >= 0 else 0
error = y[i] - prediction
weights += learning_rate * error * X[i]
bias += learning_rate * error
# Print final weights and bias
print("Final Weights:", weights)
print("Final Bias:", bias)
# Plot decision boundary
def plot_decision_boundary():
x_min, x_max = -0.5, 1.5
y_min, y_max = -0.5, 1.5
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 200),
np.linspace(y_min, y_max, 200))
grid = np.c_[xx.ravel(), yy.ravel()]
z = np.dot(grid, weights) + bias
zz = z.reshape(xx.shape)
plt.contourf(xx, yy, zz >= 0, alpha=0.4, cmap=plt.cm.coolwarm)
plt.scatter(X[:, 0], X[:, 1], c=y, edgecolors='k', cmap=plt.cm.coolwarm, s=100)
plt.title("Perceptron Decision Region for AND Gate")
plt.xlabel("Input 1")
plt.ylabel("Input 2")
plt.grid(True)
plt.show()
plot_decision_boundary()
Group A - 5. Write a python Program for Bidirectional Associative Memory with two pairs of vectors.
import numpy as np
def train_bam(A,B):
return np.dot(A.T,B)
def recall_bam(A,W):
return np.sign(np.dot(A,W))
A=np.array([[1, -1, 1], [-1, 1, -1]]) # Input Patterns
B=np.array([[1, 1, -1], [-1, -1, 1]]) # Output patterns
W=train_bam(A,B)
print("\n Weighted pattern")
print(W)
print("\nRecall pattern")
A_test=np.array([[1, -1, 1], [-1, 1, -1]])
B_recalled=recall_bam(A_test,W)
print(B_recalled)
print("\n Reverse Recall pattern")
B_test=np.array([[1, 1, -1], [-1, -1, 1]])
A_recalled=recall_bam(B_test,W)
print(A_recalled)
Group B - 1. Write a python program to show Back Propagation Network for XOR function with Binary Input and Output
import numpy as np
# Sigmoid activation and its derivative
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_derivative(output):
return output * (1 - output)
# XOR input and output (binary)
X = np.array([
[0, 0],
[0, 1],
[1, 0],
[1, 1]
])
y = np.array([
[0],
[1],
[1],
[0]
])
# Seed for reproducibility
np.random.seed(42)
# Initialize weights and biases
input_neurons = 2
hidden_neurons = 2
output_neurons = 1
weights_input_hidden = np.random.uniform(size=(input_neurons, hidden_neurons))
weights_hidden_output = np.random.uniform(size=(hidden_neurons, output_neurons))
bias_hidden = np.random.uniform(size=(1, hidden_neurons))
bias_output = np.random.uniform(size=(1, output_neurons))
# Training settings
epochs = 10000
learning_rate = 0.1
# Training loop
for epoch in range(epochs):
# FORWARD PROPAGATION
hidden_input = np.dot(X, weights_input_hidden) + bias_hidden
hidden_output = sigmoid(hidden_input)
final_input = np.dot(hidden_output, weights_hidden_output) + bias_output
final_output = sigmoid(final_input)
# ERROR
error = y - final_output
# BACKPROPAGATION
d_output = error * sigmoid_derivative(final_output)
error_hidden = d_output.dot(weights_hidden_output.T)
d_hidden = error_hidden * sigmoid_derivative(hidden_output)
# UPDATE weights and biases
weights_hidden_output += hidden_output.T.dot(d_output) * learning_rate
bias_output += np.sum(d_output, axis=0, keepdims=True) * learning_rate
weights_input_hidden += X.T.dot(d_hidden) * learning_rate
bias_hidden += np.sum(d_hidden, axis=0, keepdims=True) * learning_rate
# Print loss every 1000 epochs
if epoch % 1000 == 0:
loss = np.mean(np.square(error))
print(f"Epoch {epoch} - Loss: {loss:.4f}")
# Final Output
print("\nFinal predictions (rounded):")
print(np.round(final_output, 2))
Group B - 2. Write a python program to illustrate ART neural network.
import numpy as np
class ART1:
def __init__(self, input_size, vigilance=0.75):
self.vigilance = vigilance
self.weights = []
self.input_size = input_size
def _match(self, x, w):
intersection = np.minimum(x, w)
return np.sum(intersection) / np.sum(x) >= self.vigilance
def train(self, data):
for x in data:
for i, w in enumerate(self.weights):
if self._match(x, w):
self.weights[i] = np.minimum(self.weights[i], x)
break
else:
self.weights.append(x.copy())
def show_clusters(self):
for i, w in enumerate(self.weights):
print(f"Cluster {i+1}: {w}")
# Example usage
data = np.array([
[1, 0, 1, 0],
[1, 1, 1, 0],
[0, 0, 1, 1],
[0, 1, 1, 1]
])
model = ART1(input_size=4, vigilance=0.6)
model.train(data)
model.show_clusters()
Group B - 3. Write a python program in python program for creating a Back Propagation Feed-forward neural network
import numpy as np
# Sigmoid activation function and its derivative
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_derivative(x):
return x * (1 - x)
# XOR Input and Output
X = np.array([
[0, 0],
[0, 1],
[1, 0],
[1, 1]
])
y = np.array([
[0],
[1],
[1],
[0]
])
# Neural Network class
class NeuralNetwork:
def _init_(self, input_size, hidden_size, output_size):
# Initialize weights and biases
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
# Random weights initialization
self.weights_input_hidden = np.random.uniform(-1, 1, (input_size, hidden_size))
self.weights_hidden_output = np.random.uniform(-1, 1, (hidden_size, output_size))
# Bias initialization
self.bias_hidden = np.zeros((1, hidden_size))
self.bias_output = np.zeros((1, output_size))
def forward(self, X):
# Forward Propagation
self.hidden_input = np.dot(X, self.weights_input_hidden) + self.bias_hidden
self.hidden_output = sigmoid(self.hidden_input)
self.final_input = np.dot(self.hidden_output, self.weights_hidden_output) + self.bias_output
self.final_output = sigmoid(self.final_input)
return self.final_output
def backward(self, X, y, learning_rate=0.1):
# Backpropagation
# Compute error
error = y - self.final_output
d_output = error * sigmoid_derivative(self.final_output)
# Error in hidden layer
error_hidden = d_output.dot(self.weights_hidden_output.T)
d_hidden = error_hidden * sigmoid_derivative(self.hidden_output)
# Update weights and biases
self.weights_hidden_output += self.hidden_output.T.dot(d_output) * learning_rate
self.bias_output += np.sum(d_output, axis=0, keepdims=True) * learning_rate
self.weights_input_hidden += X.T.dot(d_hidden) * learning_rate
self.bias_hidden += np.sum(d_hidden, axis=0, keepdims=True) * learning_rate
def train(self, X, y, epochs=10000, learning_rate=0.1):
for epoch in range(epochs):
# Forward pass
self.forward(X)
# Backward pass
self.backward(X, y, learning_rate)
if epoch % 1000 == 0:
loss = np.mean(np.square(y - self.final_output)) # Mean Squared Error
print(f'Epoch {epoch} - Loss: {loss:.4f}')
def predict(self, X):
return np.round(self.forward(X)) # Round the output to 0 or 1
# Initialize Neural Network with 2 input neurons, 2 hidden neurons, and 1 output neuron
nn = NeuralNetwork(input_size=2, hidden_size=2, output_size=1)
# Train the neural network on XOR data
nn.train(X, y, epochs=10000, learning_rate=0.1)
# Test the trained neural network
print("\nPredictions after training:")
predictions = nn.predict(X)
print(predictions)
Group B - 5. Write Python program to implement CNN object detection. Discuss numerous performance evaluation metrics for evaluating the object detecting algorithms' performance.
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras.preprocessing import image
import matplotlib.pyplot as plt
# Example data - For demonstration, using random images and labels
def generate_synthetic_data(num_samples=100):
# Random data for illustration (in practice, you would use real datasets like COCO or Pascal VOC)
images = np.random.random((num_samples, 128, 128, 3)) # 128x128 RGB images
labels = np.random.randint(0, 2, (num_samples, 5)) # 5 values for each image (class + 4 bounding box coords)
return images, labels
# Define the CNN model for object detection
def create_model(input_shape=(128, 128, 3)):
model = models.Sequential([
layers.Conv2D(32, (3, 3), activation='relu', input_shape=input_shape),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(128, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(5) # 1 for class + 4 for bounding box coordinates
])
model.compile(optimizer='adam', loss='mean_squared_error')
return model
# Load data
X, y = generate_synthetic_data(50) # Example of 50 samples (in practice, use a real dataset)
# Create model
model = create_model()
# Train the model
model.fit(X, y, epochs=10, batch_size=4)
# Make predictions on a sample
predictions = model.predict(X[:5])
# Display predictions (Bounding boxes and class)
for i in range(5):
plt.imshow(X[i])
plt.title(f"Predicted: {predictions[i]}")
plt.show()
Group C - 1. How to Train a Neural Network with TensorFlow/Pytorch and evaluation of logistic regression using tensorflow
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
# Load and prepare the data
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# Reshape and normalize the data
X_train = X_train.reshape((X_train.shape[0], 28, 28, 1)) # Add the channel dimension (1 for grayscale)
X_test = X_test.reshape((X_test.shape[0], 28, 28, 1))
X_train, X_test = X_train / 255.0, X_test / 255.0 # Normalize pixel values to be between 0 and 1
# One-hot encode the labels
y_train = to_categorical(y_train, 10) # 10 classes (digits 0-9)
y_test = to_categorical(y_test, 10)
# Build the Neural Network Model
model = models.Sequential([
layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.Flatten(),
layers.Dense(64, activation='relu'),
layers.Dense(10, activation='softmax') # Output layer (10 classes)
])
# Compile the model
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# Train the model
model.fit(X_train, y_train, epochs=5, batch_size=64, validation_data=(X_test, y_test))
# Evaluate the model
test_loss, test_accuracy = model.evaluate(X_test, y_test)
print(f'Test accuracy: {test_accuracy:.4f}')
Group C - 2. TensorFlow/Pytorch implementation of CNN
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
# Load MNIST dataset
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# Reshape the data to add channel dimension (1 for grayscale images)
X_train = X_train.reshape((X_train.shape[0], 28, 28, 1))
X_test = X_test.reshape((X_test.shape[0], 28, 28, 1))
# Normalize the pixel values to be between 0 and 1
X_train, X_test = X_train / 255.0, X_test / 255.0
# One-hot encode the labels
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)
# Define the CNN model
model = models.Sequential([
layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.Flatten(),
layers.Dense(64, activation='relu'),
layers.Dense(10, activation='softmax') # Output layer for 10 classes (digits 0-9)
])
# Compile the model
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# Train the model
model.fit(X_train, y_train, epochs=5, batch_size=64, validation_data=(X_test, y_test))
# Evaluate the model
test_loss, test_accuracy = model.evaluate(X_test, y_test)
print(f'Test accuracy: {test_accuracy:.4f}')
Group C - 4. MNIST Handwritten Character Detection using PyTorch, Keras and Tensorflow
# For Tenserflow
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
# Load and preprocess the MNIST dataset
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(-1, 28, 28, 1).astype('float32') / 255.0
X_test = X_test.reshape(-1, 28, 28, 1).astype('float32') / 255.0
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)
# Build the CNN model using TensorFlow low-level API
model = tf.keras.Sequential([
layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.Flatten(),
layers.Dense(64, activation='relu'),
layers.Dense(10, activation='softmax')
])
# Compile the model
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# Train the model
model.fit(X_train, y_train, epochs=5, batch_size=64, validation_data=(X_test, y_test))
# Evaluate the model
test_loss, test_accuracy = model.evaluate(X_test, y_test)
print(f'Test accuracy: {test_accuracy:.4f}')
# for keras
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
# Load the MNIST dataset
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# Reshape and normalize the data
X_train = X_train.reshape((X_train.shape[0], 28, 28, 1)) # Add the channel dimension (1 for grayscale)
X_test = X_test.reshape((X_test.shape[0], 28, 28, 1))
X_train, X_test = X_train / 255.0, X_test / 255.0 # Normalize pixel values to be between 0 and 1
# One-hot encode the labels
y_train = to_categorical(y_train, 10) # 10 classes (digits 0-9)
y_test = to_categorical(y_test, 10)
# Build the CNN model
model = models.Sequential([
layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.Flatten(),
layers.Dense(64, activation='relu'),
layers.Dense(10, activation='softmax') # Output layer (10 classes)
])
# Compile the model
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# Train the model
model.fit(X_train, y_train, epochs=5, batch_size=64, validation_data=(X_test, y_test))
# Evaluate the model
test_loss, test_accuracy = model.evaluate(X_test, y_test)
print(f'Test accuracy: {test_accuracy:.4f}')
Comments
Post a Comment