import pandas as pd
df = pd.read_csv('heart.csv')
df.isnull().sum()
df= df.fillna(df.median())
df=df.astype({'oldpeak':'int','thalach':'int'})

X = df.drop('target',axis='columns')
Y = df.iloc[:,-1]

import numpy as np
Y_train = np.ravel(Y_train)

X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=42)

reg = LogisticRegression()
reg.fit(X_train, Y_train)

Y_pred = reg.predict(X_test)
Y_pred

print(accuracy_score(Y_test,Y_test))
print(classification_report(Y_test,Y_pred))
print(confusion_matrix(Y_test,Y_pred))
sns.heatmap(confusion_matrix(Y_test,Y_pred),annot=True)

x = df['age']
y = df['chol']
plt.scatter(x,y)
plt.show()
plt.bar(x,y)
plt.show()

 
##111111111111111111111111111111111111111111111111111111111#####################################################################################
 
 import pandas as pd
import numpy as np f
rom sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
import seaborn as sns
import matplotlib.pyplot as plt
df=pd.read_csv("Heart.csv")
print(df)
print(df.to_string)
display(df)
print(df.shape)
print(df.dtypes)
df.astype({'age':'float'})
df.dropna(inplace=True)
 
x=df["restecg"].mean()
df["restecg"].fillna(x, inplace=True)
print(df)
 
df.drop_duplicates(inplace=True) df.dropna()
X=df.drop('target',axis='columns') Y=df['target']
 
X_train,X_test,Y_train,Y_test=train_test_split(X,Y,test_size=0.2)
 
X_train.shape
Y_train.shape
 
Reg=LogisticRegression()
Reg.fit(X_train,Y_train)
 
LogisticRegression()
ypred=Reg.predict(X_test)
ypred
 
print(accuracy_score(Y_test,ypred))
 
print(classification_report(Y_test,ypred))
 
print(confusion_matrix(Y_test,ypred))
 
sns.heatmap(confusion_matrix(Y_test,ypred),annot=True)
 
X1=['age']
Y1=['target']
plt.scatter(X1,Y1)

$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#111111111111111111111111111111111111#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$

 

 

import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
import seaborn as sns
import matplotlib.pyplot as plt
 
df = pd.read_csv("D:\heart.csv")
df
 
df.isna()
 
df.isnull().sum()
 
a = df.duplicated().sum()
a
 
df1 = df.fillna(df.median())
df1
 
print(df1.to_string())
 
df1.drop_duplicates()
 
df = df1.astype({"chol":"int","trestbps":"int","oldpeak":"int"})
df
 
X = df.drop("target", axis="columns")
X
 
Y = df["target"]
Y
 
X_train, X_test, Y_train, Y_test = train_test_split(X,Y,test_size = 0.25)
X_train.shape
Y_train.shape
X_test.shape
Y_test.shape
 
Reg = LogisticRegression()
Reg.fit(X_train, Y_train)
 
Y_predict = Reg.predict(X_test)
Y_predict.shape
 
print(accuracy_score(Y_test, Y_predict))
 
print(classification_report(Y_test, Y_predict))
 
print(confusion_matrix(Y_test, Y_predict))

 

sns.heatmap(confusion_matrix(Y_test, Y_predict),annot = True)
2222
22222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222

 

 

import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn import metrics
 
df = pd.read_csv("D:\weight-height (1)(1).csv")
df
 
X = df.iloc[:,1:2]
Y = df.iloc[:,2]
X_train, X_test, Y_train, Y_test = train_test_split(X,Y,test_size = 0.2)
Reg = LinearRegression()
Reg.fit(X_train,Y_train)
 
Y_predict = Reg.predict(X_test)
print(Reg.coef_)
 
print(Reg.intercept_)
 
plt.scatter(X_test,Y_test)
plt.plot(X_test,Y_predict,color = "red")
 
print('meansqaureerror',metrics.mean_squared_error(Y_test,Y_predict))
 
print("meanabsoluteerror",metrics.mean_absolute_error(Y_test,Y_predict))
 
Rsquare = Reg.score(X_train,Y_train)
 
print(Rsquare)

 

 

 

 

 

33333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333

 

 
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score

 

pip install graphviz
df=pd.read_csv("D:\Admission_Predict .csv",sep=',')
df.columns
df.shape
df.columns = df.columns.str.rstrip()
df.columns
df = df.isnull().sum()
df.loc[df['Chance of Admit'] >=0.80,'Chance of Admit']=1
df.loc[df['Chance of Admit'] <0.80,'Chance of Admit']=0
 
df['Chance of Admit']
df
 
df=df.drop('Serial No.',axis=1)
df
 
X=df.iloc[:,0:7].values
Y=df.iloc[:,7].values
Y
 
from sklearn.model_selection import train_test_split,StratifiedKFold,cross_val_score
X_train, X_test, Y_train, Y_test = train_test_split(X,Y,test_size = 0.25,random_state=0)
print(X_train.shape,end=' ')
print(X_test.shape)
 
model = DecisionTreeClassifier(criterion='entropy', max_depth=2)
model.fit(X_train,Y_train)
Y_Pred=model.predict(X_test)
matrix=confusion_matrix(Y_test,Y_Pred,labels=[0.0,1.0])
matrix
 
acc=accuracy_score(Y_test,Y_Pred)
print('Accuracy of Decision Tree Model = ',acc)
 
from sklearn.metrics import classification_report
cr=classification_report(Y_test,Y_Pred)
print('Classification Report ',cr)
 
feature_names=df.columns[0:7]
print(feature_names,end=' ')
class_names=[str(x) for x in model.classes_]
class_names
 
from sklearn.tree import plot_tree
fig=plt.figure(figsize=(50,30))
plot_tree(model,feature_names=feature_names,class_names=class_names,filled=True)
plt.savefig('tree_visualization.png')
 
import graphviz
from sklearn import tree
sf=StratifiedKFold(n_splits=5,shuffle=True,random_state=0)
depth=[1,2,3,4,5,6,7,8,9,10]
for d in depth:
 score=cross_val_score(tree.DecisionTreeClassifier(criterion='entropy',max_depth=d,random_state=0),X_train,Y_train,cv=sf,scoring='accuracy')
 print("Average score for depth {} is {} :".format(d,score.mean()))

 

 

 

555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555

 

import pandas as pd
from keras.models import Sequential
from keras.layers import Dense
 
df=pd.read_csv('pima-indians-diabetes.csv')
df.info
 
df.columns
 
X=df.iloc[:,0:-1].values
y=df.iloc[:,8].values
 
model=Sequential()
model.add(Dense(12, input_dim=8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X,y,epochs=150, batch_size=10)
_, accuracy =model.evaluate(X,y)
print('Accuracy: %.2f' % (accuracy*100))
from ann_visualizer.visualize import ann_viz;
ann_viz(model, title="My First Neural Network")

 

 

 
score.mean() 

 

 

 

 

 

 

@@@@@@11111111111111111111111111111111111111111111111111111111111111111111111111111111@@@@@

 

// Find and modify a document and return the updated one
const updatedDocument = db.posts.findAndModify({
query: { title: "Updated Document" },
update: { $inc: { likes: 10 } },
new: true, // Return the modified document
});

 

db.createCollection(“Teacher_info")
db.createCollection("audit", {capped:true, size:20480}) 

 

db.Teacher_info.insert( { Teacher_id: “Pic001", Teacher_Name: “Ravi",Dept_Name: 
“IT”, Sal:30000, status: "A" } 
db.Teacher_info.insert( { Teacher_id: “Pic002", Teacher_Name: “Ravi",Dept_Name: 
“IT”, Sal:20000, status: "A" } )
db.Teacher_info.insert( { Teacher_id: “Pic003", Teacher_Name: “Akshay",Dept_Name: 
“Comp”, Sal:25000, status: “N" } ) 

 

db. Teacher_info.update( { sal: { $gt: 25000 } }, { $set: { Dept_name: “ETC" } }, { 
multi: true } ) 
db. Teacher_info.update( { status: "A" } , { $inc: { sal: 10000 } }, { multi: true } ) 
db.Teacher_info.remove({Teacher_id: "pic001"});
db. Teacher_info.remove({}) 

 

db.Teacher_info.update( { }, { $set: { join_date: new Date() } }, { multi: true} ) 

 

db.Teacher_info.drop()

 

db.Teacher.find()

 

db.Teacher_info.find({sal: 25000})

 

db.Teacher_info.find( { $or: [ { status: "A" } , { sal:50000 } ] } ) 

 

db. Teacher_info.find( { sal: { $gt: 40000 } } )

 

db.media.find( { Released : {$gt : 2000} }, { "Cast" : 0 } )

 

>db.media.find ( { Released : {$gte : 1999 } }, { "Cast" : 0 } 

 

 

.Exclude One Field from a Result Set
>db.records.Find( { "user_id": { $lt: 42} }, { history: 0} )

 

.Return Two fields and the _id Field
>db.records.find( { "user_id": { $lt: 42} }, { "name": 1, "email": 1} )

 

.Return Two Fields and Exclude _id
>db.records.find( { "user_id": { $lt: 42} }, { "_id": 0, "name": 1 , "email": 1 
} ) 

 

db..find().pretty() 

 

>db. Teacher_info.find( { status: "A" } ).sort( {sal: -1 } ) 

 

>db.audit.find().sort( { $natural: -1 } ).limit ( 10 )

 

db.Employee.find().sort({_id:-1})

 

db.Employee.find().sort({_id:1})

 

>db.Employee.find().skip(3).pretty()

 

 

 

@@@@@@@222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222@@@@@@@@@@@@@@@@@@@

 

 
db.products.insertMany([ { product: "Apple", category: "Fruits", price: 2.5, quantity: 10 }, { product: "Milk", category: "Dairy", price: 1.5, quantity: 5 }, { product: "Bread", category: "Bakery", price: 2, quantity: 8 }, { product: "Chicken", category: "Meat", price: 5, quantity: 3 }, { product: "Tomato", category: "Vegetables", price: 1, quantity: 12 }, { product: "Eggs", category: "Dairy", price: 3, quantity: 15 }, { product: "Rice", category: "Grains", price: 4, quantity: 6 }, { product: "Watermelon", category: "Fruits", price: 6, quantity: 1 }, { product: "Butter", category: "Dairy", price: 2.5, quantity: 4 }, { product: "Salmon", category: "Seafood", price: 8, quantity: 2 },{ product: "Cheese", category: "Dairy", price: 3.5, quantity: 7 }, { product: "Yogurt", category: "Dairy", price: 2, quantity: 4 }, { product: "Ice Cream", category: "Dairy", price: 4.5, quantity: 3 }

 

var mapFunction = function() {
emit(this.product, this.quantity * this.price);
};

var reduceFunction = function(key, values) { return Array.sum(values); };

db.sales.mapReduce( mapFunction, reduceFunction, { out: "product_total_revenue" // Store the results in a new collection } )

 

db.sales.aggregate([ { $group: { _id: "$product",  { $avg: "$price" } } }]);
 

 

 

 

 

 

 

screenshot20231120082556.png

 

 

 

 

screenshot20231120082634.png

 


Created: 20/11/2023 22:46:39
Page views: 59
CREATE NEW PAGE