Commit f51158c1 by Febby Simanjuntak

stbi

parents
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"%matplotlib inline\n",
"# import sys\n",
"# reload(sys)\n",
"# sys.setdefaultencoding('utf-8')\n",
"import re\n",
"import csv\n",
"import string\n",
"import random\n",
"import pandas as pd\n",
"import numpy as np\n",
"from sklearn import metrics\n",
"from sklearn.svm import SVC"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"#\n",
"#\n",
"#\n",
"file=open ('fradulent_emails.txt','r')"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"<_io.TextIOWrapper name='fradulent_emails.txt' mode='r' encoding='cp1252'>"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"file"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.3"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"%matplotlib inline\n",
"# import sys\n",
"# reload(sys)\n",
"# sys.setdefaultencoding('utf-8')\n",
"import re\n",
"import csv\n",
"import nltk\n",
"import string\n",
"import random\n",
"import pandas as pd\n",
"import numpy as np\n",
"from nltk.corpus import stopwords\n",
"from sklearn import metrics\n",
"from sklearn.svm import SVC"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"file=open ('fradulent_emails.txt')"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"<_io.TextIOWrapper name='fradulent_emails.txt' mode='r' encoding='cp1252'>"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"file"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Tokenization"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"def tokenize(row):\n",
" if row is None or row is '':\n",
" tokens = \"\"\n",
" else:\n",
" tokens = str(row).split(\" \")[:maxtokens]\n",
" return tokens"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Regular expressions to remove unnecessary characters"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"import re\n",
"\n",
"def reg_expressions(row):\n",
" tokens = []\n",
" try:\n",
" for token in row:\n",
" token = token.lower() # make all characters lower case\n",
" token = re.sub(r'[\\W\\d]', \"\", token)\n",
" token = token[:maxtokenlen] # truncate token\n",
" tokens.append(token)\n",
" except:\n",
" token = \"\"\n",
" tokens.append(token)\n",
" return tokens"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Stop-word removal"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
"def stop_word_removal(row):\n",
" token = [token for token in row if token not in stopwords]\n",
" token = filter(None, token)\n",
" return token"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.3"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
This source diff could not be displayed because it is too large. You can view the blob instead.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment