Skip to main content


 LAB 1(IMAGE CLASSIFICATION)


import tensorflow as tf
from tensorflow.keras import datasets, layers, models
import matplotlib.pyplot as plt
import numpy as np
# Load CIFAR-10 dataset
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
train_images, test_images = train_images / 255.0, test_images / 255.0  # Normalize
# Class names in CIFAR-10
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 
               'dog', 'frog', 'horse', 'ship', 'truck']
# Plot some sample images
plt.figure(figsize=(10,10))
for i in range(25):
    plt.subplot(5,5,i+1)
    plt.xticks([])
    plt.yticks([])
    plt.grid(False)
    plt.imshow(train_images[i])
    plt.xlabel(class_names[int(train_labels[i])])
plt.show()
# Define CNN model
model = models.Sequential()
model.add(layers.Conv2D(32, (3,3), activation='relu', input_shape=(32, 32, 3)))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Conv2D(64, (3,3), activation='relu'))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Conv2D(64, (3,3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10))
# Compile the model
model.compile(optimizer='adam',
              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
              metrics=['accuracy'])
# Train the model
history = model.fit(train_images, train_labels, epochs=10, 
                    validation_data=(test_images, test_labels))
# Evaluate the model
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\nTest accuracy:', test_acc)
# Plot training & validation accuracy values
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label='val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0, 1])
plt.legend(loc='lower right')
plt.show() 



LAB 2 (GRADIENT DESENDENT)


import numpy as np
def mse(y_true, y_pred):
    
    return np.mean((y_true - y_pred) ** 2)
def gradient(y_true, y_pred, X):
    
    
    return -2 * np.dot(X.T, (y_true - y_pred)) / len(y_true)
def predict(X, w):
    
    return X * w
def gradient_descent(X, y_true, w, learning_rate):
   
    y_pred = predict(X, w)
    error = mse(y_true, y_pred)
    grad = gradient(y_true, y_pred, X)
    w_new = w - learning_rate * grad
    return w_new, y_pred, error, grad

X = np.array([2.0])       # Input feature as an array with one value.
y_true = np.array([7.0])  # True output as an array with one value.

w = np.array([0.2])
learning_rate = 0.01
print("Gradient Descent with single-element np.array for input and output:\n")
for i in range(10):
    w, y_pred, error, grad = gradient_descent(X, y_true, w, learning_rate)
    print(f"Iteration {i+1}:")
    print(f"  Predicted y       : {y_pred}")
    print(f"  Mean Squared Error: {error:.4f}")
    print(f"  Gradient          : {grad}")
    print(f"  Updated Weight    : {w}\n")


LAB 3 (ONLINE RETAIL DATASET)


import tensorflow as tf
from tensorflow.keras import layers, models
import matplotlib.pyplot as plt
import numpy as np

# Load CIFAR-10 dataset
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()

# Normalize image data
x_train, x_test = x_train / 255.0, x_test / 255.0

# Class names
class_names = ['Airplane', 'Automobile', 'Bird', 'Cat', 'Deer', 
               'Dog', 'Frog', 'Horse', 'Ship', 'Truck']

# Define CNN architecture
model = models.Sequential([
    layers.Conv2D(32, (3,3), activation='relu', input_shape=(32,32,3)),
    layers.MaxPooling2D((2,2)),

    layers.Conv2D(64, (3,3), activation='relu'),
    layers.MaxPooling2D((2,2)),

    layers.Conv2D(64, (3,3), activation='relu'),
    layers.Flatten(),

    layers.Dense(64, activation='relu'),
    layers.Dense(10, activation='softmax')  
])

# Compile the model
model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

# Train the model
model.fit(x_train, y_train, epochs=10, validation_data=(x_test, y_test))

# Evaluate the model
test_loss, test_acc = model.evaluate(x_test, y_test, verbose=2)
print("\nTest Accuracy:", test_acc)

# Predict and visualize results
predictions = model.predict(x_test)

def display_predictions(index):
    plt.figure(figsize=(2,2))
    plt.imshow(x_test[index])
    plt.title(f"Predicted: {class_names[np.argmax(predictions[index])]} | Actual: {class_names[int(y_test[index])]}")
    plt.axis('off')
    plt.show()

# Show first 3 test images with predictions
for i in range(3):
    display_predictions(i)

 LAB 4 ( RS FROM SALES DATA)

import pandas as pd
import numpy as np
from keras.models import Model
from keras.layers import Input, Embedding, Flatten, Dot, Dense, Concatenate
from keras.optimizers import Adam
from sklearn.model_selection import train_test_split

# Sample sales data (UserID, ProductID, Rating)
data = pd.DataFrame({
    'user_id': [1, 2, 3, 1, 2, 3, 4],
    'product_id': [101, 101, 101, 102, 102, 103, 104],
    'rating': [5, 4, 3, 2, 5, 4, 3]
})

# Encode user and product IDs
user_ids = data['user_id'].unique().tolist()
product_ids = data['product_id'].unique().tolist()
user2user_encoded = {x: i for i, x in enumerate(user_ids)}
product2product_encoded = {x: i for i, x in enumerate(product_ids)}
data['user'] = data['user_id'].map(user2user_encoded)
data['product'] = data['product_id'].map(product2product_encoded)

# Model parameters
num_users = len(user2user_encoded)
num_products = len(product2product_encoded)
embedding_size = 8

# Split data
X = data[['user', 'product']].values
y = data['rating'].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# Model architecture
user_input = Input(shape=(1,))
product_input = Input(shape=(1,))

user_embedding = Embedding(input_dim=num_users, output_dim=embedding_size)(user_input)
product_embedding = Embedding(input_dim=num_products, output_dim=embedding_size)(product_input)

user_vec = Flatten()(user_embedding)
product_vec = Flatten()(product_embedding)

concat = Concatenate()([user_vec, product_vec])
dense = Dense(64, activation='relu')(concat)
output = Dense(1)(dense)

model = Model([user_input, product_input], output)
model.compile(loss='mse', optimizer=Adam(0.001))

# Train model
model.fit([X_train[:, 0], X_train[:, 1]], y_train, 
          epochs=20, verbose=1, validation_split=0.2)

# Evaluate model
mse = model.evaluate([X_test[:, 0], X_test[:, 1]], y_test)
print(f'Mean Squared Error: {mse:.4f}')


LAB5 ( CNN modle for classifying Images )

import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.utils import to_categorical
import matplotlib.pyplot as plt

# Load CIFAR-10 dataset
(x_train, y_train), (x_test, y_test) = cifar10.load_data()

# Normalize and one-hot encode
x_train, x_test = x_train/255.0, x_test/255.0
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)

# Build CNN model
model = Sequential([
    Conv2D(32, (3,3), activation='relu', input_shape=(32,32,3)),
    MaxPooling2D((2,2)),
    Conv2D(64, (3,3), activation='relu'),
    MaxPooling2D((2,2)),
    Flatten(),
    Dense(128, activation='relu'),
    Dense(10, activation='softmax')
])

# Compile and train
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=10, batch_size=64, validation_data=(x_test, y_test))

# Evaluate
test_loss, test_acc = model.evaluate(x_test, y_test)
print(f"Test Accuracy: {test_acc:.4f}")

# Predict 10 test images
categories = ['Airplane','Automobile','Bird','Cat','Deer','Dog','Frog','Horse','Ship','Truck']
predictions = model.predict(x_test[:10])
pred_classes = predictions.argmax(axis=1)

# Show predictions
plt.figure(figsize=(10,5))
for i in range(10):
    plt.subplot(2,5,i+1)
    plt.imshow(x_test[i])
    plt.title(f"Pred: {categories[pred_classes[i]]}")
    plt.axis('off')
plt.tight_layout()
plt.show()



LAB 6 ( SENTIMENT ANALYSIS)


import tensorflow as tf
from tensorflow.keras.datasets import imdb
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, SimpleRNN, Dense
from tensorflow.keras.preprocessing.sequence import pad_sequences
# Set parameters
vocab_size = 10000 # Use top 10,000 words
maxlen = 500 # Max length of review
embedding_dim = 32 # Embedding dimensions for each word
# Load the IMDB dataset
print("Loading IMDB dataset...")
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=vocab_size)
# Pad sequences to make all reviews the same length
x_train = pad_sequences(x_train, maxlen=maxlen)
x_test = pad_sequences(x_test, maxlen=maxlen)
# Build the RNN model
print("Building the model...")
model = Sequential()
model.add(Embedding(input_dim=vocab_size, output_dim=embedding_dim, 
input_length=maxlen))
model.add(SimpleRNN(units=64, activation='tanh'))
model.add(Dense(1, activation='sigmoid')) # Binary classification
# Compile the model
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
model.summary()
# Train the model
print("Training the model...")
model.fit(x_train, y_train, epochs=5, batch_size=64, validation_split=0.2)
# Evaluate the model
print("Evaluating the model...")
loss, accuracy = model.evaluate(x_test, y_test)
print(f"Test Accuracy: {accuracy:.4f}")

Comments

Comments

Popular posts from this blog

Web

Lab 1 ai