1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
| import pandas as pd from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.naive_bayes import MultinomialNB from sklearn.pipeline import Pipeline from sklearn.metrics import classification_report, confusion_matrix
messages = pd.read_csv('smsspamcollection/SMSSpamCollection', sep='\t', names=['label', 'message']) messages['length'] = messages['message'].apply(len)
def text_process(mess): ''' Takes in a string of text, then performs the following: 1. Remove all punctuation 2. Remove all stopwords 3. Returns a list of the cleaned text ''' # Check characters to see if they are in punctuation nopunc = [char for char in mess if char not in string.punctuation]
# Join the characters again to form the string. nopunc = ''.join(nopunc)
# Now just remove any stopwords return [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]
msg_train, msg_test, label_train, label_test = train_test_split(messages['message'], messages['label'])
pipeline = Pipeline([ ('bow', CountVectorizer(analyzer=(text_process))), ('tfidf', TfidfTransformer()), ('classifier', MultinomialNB()) ])
pipeline.fit(msg_train, label_train) predictions = pipeline.predict(msg_test) print(classification_report(label_test, predictions)) print(confusion_matrix(label_test, predictions))
|