Posts

Dv 8

 Snowflake In the Experiment - 8 Server Name : DMJQJCZ-FT89804.snowflakecomputing.com Warehouse: SNOWFLAKE_LEARNING_WH Database: SNOWFLAKE_SAMPLE_DATA

Dv 1st

 import pandas as pb  import matplotlib.pyplot as plt  import seaborn as sns  crime=pb.read_csv('crime.csv')  crime plt.plot(crime.Murder,crime.Assault) import seaborn as sns sns.scatterplot(data = crime,x='Murder', y='Assault'); sns.scatterplot(x=crime.Murder,y=crime.Assault,hue=crime.Murder,s=100); plt.figure(figsize=(12,6))  plt.title('Murder Vs Assault')  sns.scatterplot(x=crime.Murder,y=crime.Assault,hue=crime.Murder,s=100); plt.title('Histogram for Robbery') plt.hist(crime.Robbery); plt.bar(crime.index,crime.Robbery); sns.barplot(x='Robbery',y='Year',data=crime); plt.show() import matplotlib.pyplot as plt  import pandas as pd  import numpy as np  data=pd.read_csv('crime.csv')  x=data.Population  y=data.CarTheft   plt.scatter(x,y)  plt.xlabel('Population')  plt.ylabel('CarTheft')  plt.title('Population Vs CarTheft')  plt.show();

6-10

 Lab 6  import nltk #define the text sentence = "I told the children I was going to tell them a story. They were excited" #tokenize the text tokens = nltk.word_tokenize(sentence) #perform POS tagging tags = nltk.pos_tag(tokens) #define a chunk grammar named mychunk chunk_grammar = """ mychunk: {<NNS.?>*<PRP.?>*<VBD?>}""" #parse the grammar with regular expression parser parser = nltk.RegexpParser(chunk_grammar) #assign the chunk tree = parser.parse(tags) # Print the tree instead of drawing it print(tree) Lab 7  !pip install nltk import nltk from nltk import CFG from nltk.parse import ChartParser cnf_grammar = CFG.fromstring(""" S -> NP VP VP -> V NP | VP PP PP -> P NP V -> 'saw' | 'ate' | 'walked' NP -> 'John' | 'Mary' | 'Bob' | Det N | NP PP Det -> 'a' | 'an' | 'the' N -> 'man' | 'dog' | 'cat' | ...

1-5

Lab 1  import nltk nltk.download('punkt')  nltk.download('punkt_tab')  from nltk.tokenize import sent_tokenize def tokenize_sentences(text):     sentences = sent_tokenize(text)       return sentences text = "NLTK is a leading platform for building Python programs to work with human language data. It provides easy-to-use interfaces to over 50 corpora and lexical resources such as WordNet,along with a suite of text processing libraries for classification, tokenization, stemming, tagging,parsing, and semantic reasoning, wrappers for industrial-strength NLP libraries, and an activediscussion forum." sentences = tokenize_sentences(text) for i, sentence in enumerate(sentences):     print(f"Sentence {i+1}: {sentence}") import nltk from nltk.tokenize import word_tokenize word_tokenize('won’t') import nltk nltk.download('punkt')  from nltk.tokenize import word_tokenize def tokenize_words(text):     words = word_tokenize(text) ...
 LAB 1(IMAGE CLASSIFICATION) import tensorflow as tf from tensorflow.keras import datasets, layers, models import matplotlib.pyplot as plt import numpy as np # Load CIFAR-10 dataset (train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data() train_images, test_images = train_images / 255.0, test_images / 255.0  # Normalize # Class names in CIFAR-10 class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',                 'dog', 'frog', 'horse', 'ship', 'truck'] # Plot some sample images plt.figure(figsize=(10,10)) for i in range(25):     plt.subplot(5,5,i+1)     plt.xticks([])     plt.yticks([])     plt.grid(False)     plt.imshow(train_images[i])     plt.xlabel(class_names[int(train_labels[i])]) plt.show() # Define CNN model model = models.Sequential() model.add(layers.Conv2D(32, (3,3), activation='relu', input_shape=(32, 32, 3))) model.add...

5

import nltk from nltk.tokenize import word_tokenize nltk.download('punkt') nltk.download('averaged_perceptron_tagger') def pos_tagging(text): words = word_tokenize(text) tagged_words = nltk.pos_tag(words) return tagged_words text = "NLTK is a leading platform for building Python programs to work with human language data." tagged_text = pos_tagging(text) print(tagged_text)

4

from nltk.tokenize import word_tokenize from nltk.stem import WordNetLemmatizer nltk.download('punkt') nltk.download('wordnet') def lemmatize_text(text): lemmatizer = WordNetLemmatizer() tokens = word_tokenize(text) lemmatized_text = ' '.join([lemmatizer.lemmatize(word) for word in tokens]) return lemmatized_text text = "The cats are chasing mice and playing in the garden" lemmatized_text = lemmatize_text(text) print("Original Text:", text) print("Lemmatized Text:", lemmatized_text