Posts

lab 7

  from transformers import pipeline summarizer = pipeline( "summarization" , model= "t5-small" , tokenizer= "t5-small" ) passage = (   "Machine learning is a subset of artificial intelligence that focuses on "   "training algorithms to make predictions. It is widely used in industries "   "like healthcare, finance, and retail." ) summary = summarizer(passage, max_length= 30 , min_length= 10 , do_sample= False )[ 0 ][ "summary_text" ] print ( "Summary:\n" , summary)

lab 6

  from transformers import pipeline sentiment = pipeline( "sentiment-analysis" ) sentences = [   "I love this product! It works perfectly." ,   "This is the worst experience I've ever had." ,   "The weather is nice today." ] results = sentiment(sentences) for s, r in zip (sentences, results):     print ( f " {s} -> {r[ 'label' ]} ( {r[ 'score' ] :.2f } )" )

lab 5

  import gensim.downloader as api model = api.load( "glove-wiki-gigaword-50" ) def make_paragraph ( seed ):     sims = model.most_similar(seed, topn= 5 )     s = [w for w, _ in sims]     para = ( f "In the spirit of {seed} , one may explore {s[ 0 ]} , {s[ 1 ]} and {s[ 2 ]} . "             f "Through {s[ 3 ]} and {s[ 4 ]} , every journey becomes memorable." )     return s, para seed_word = "adventure" similar_words, paragraph = make_paragraph(seed_word) print ( "Similar words:" , similar_words) print ( "\nParagraph:\n" , paragraph)

lab 4

  from transformers import pipeline import gensim.downloader as api wv = api.load( "glove-wiki-gigaword-50" ) base_word = "technology" sim = [w for w, _ in wv.most_similar(base_word, topn= 3 )] print ( "Similar words:" , sim) orig_prompt = "Explain the impact of technology on society." enriched_prompt = orig_prompt + " Also discuss " + ", " .join(sim) + "." gen = pipeline( "text-generation" , model= "gpt2" ) print ( "\nOriginal prompt response:\n" ,       gen(orig_prompt, max_length= 80 )[ 0 ][ "generated_text" ]) print ( "\nEnriched prompt response:\n" ,       gen(enriched_prompt, max_length= 100 )[ 0 ][ "generated_text" ])

lab 3

  from gensim.models import Word2Vec from nltk.tokenize import word_tokenize import nltk nltk.download( 'punkt_tab' ) corpus = [   "A patient with diabetes requires insulin injections." ,   "Medical professionals recommend exercise for heart health." ,   "Doctors use MRI scans to diagnose brain disorders." ,   "Antibiotics help fight bacterial infections." ,   "A doctor specializes in diagnosing and treating diseases." ] tokens = [word_tokenize(s.lower()) for s in corpus] model = Word2Vec(tokens, vector_size= 50 , window= 3 , min_count= 1 , sg= 1 ) print ( "Similar to 'doctor':" , model.wv.most_similar( "doctor" , topn= 5 ))

lab 2

  ! pip install gensim nltk transformers torch matplotlib scikit-learn import gensim.downloader as api import numpy as np from sklearn.decomposition import PCA import matplotlib.pyplot as plt model = api.load( "glove-wiki-gigaword-50" ) words = [ "computer" , "internet" , "software" , "hardware" , "data" ,           "robot" , "ai" , "network" , "cloud" , "algorithm" ] vecs = np.array([model[w] for w in words]) xy = PCA( 2 ).fit_transform(vecs)     plt.scatter(xy[:, 0 ], xy[:, 1 ]) for (x, y), w in zip (xy, words):     plt.text(x+ 0.01 , y+ 0.01 , w) plt.title( "PCA – Technology words" ) plt.xlabel( "PC1" ); plt.ylabel( "PC2" ) plt.show() word = "computer" print ( "Similar to" , word, ":" , model.most_similar(word, topn= 5 ))

lab 1

  ! pip install gensim nltk transformers torch matplotlib scikit-learn import gensim.downloader as api model = api.load( "glove-wiki-gigaword-50" ) w1, w2, w3 = "king" , "man" , "woman" vec = model[w1] - model[w2] + model[w3] word = model.most_similar([vec], topn= 1 )[ 0 ][ 0 ] print ( f " {w1} - {w2} + {w3} ≈ {word} " )