Skip to content
GitLab
Explore
Sign in
Register
Primary navigation
Search or go to…
Project
S
ScienceAccess
Manage
Activity
Members
Labels
Plan
Issues
9
Issue boards
Milestones
Wiki
Requirements
Code
Merge requests
0
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Locked files
Build
Pipelines
Jobs
Pipeline schedules
Test cases
Artifacts
Deploy
Releases
Package Registry
Container Registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Code review analytics
Issue analytics
Insights
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Russell Jarvis
ScienceAccess
Commits
669c0f17
Unverified
Commit
669c0f17
authored
4 years ago
by
Russell Jarvis
Committed by
GitHub
4 years ago
Browse files
Options
Downloads
Patches
Plain Diff
Delete file_download_upload.py
parent
63035911
No related branches found
Branches containing commit
No related tags found
No related merge requests found
Pipeline
#1247
failed with stages
Stage:
Stage:
in 4 minutes and 4 seconds
Changes
1
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
file_download_upload.py
+0
-308
0 additions, 308 deletions
file_download_upload.py
with
0 additions
and
308 deletions
file_download_upload.py
deleted
100644 → 0
+
0
−
308
View file @
63035911
import
streamlit
as
st
import
os
# NLP Pkgs
from
textblob
import
TextBlob
import
spacy
from
gensim.summarization
import
summarize
# Sumy Pkg
from
sumy.parsers.plaintext
import
PlaintextParser
from
sumy.nlp.tokenizers
import
Tokenizer
from
sumy.summarizers.lex_rank
import
LexRankSummarizer
##
# code to acknowledge and integrate from awesome streamlit
##
# Sumy Summarization
def
sumy_summarizer
(
docx
):
parser
=
PlaintextParser
.
from_string
(
docx
,
Tokenizer
(
"
english
"
))
lex_summarizer
=
LexRankSummarizer
()
summary
=
lex_summarizer
(
parser
.
document
,
3
)
summary_list
=
[
str
(
sentence
)
for
sentence
in
summary
]
result
=
"
"
.
join
(
summary_list
)
return
result
# Function For Analysing Tokens and Lemma
@st.cache
def
text_analyzer
(
my_text
):
nlp
=
spacy
.
load
(
"
en
"
)
docx
=
nlp
(
my_text
)
# tokens = [ token.text for token in docx]
allData
=
[
(
'"
Token
"
:{},
\n
"
Lemma
"
:{}
'
.
format
(
token
.
text
,
token
.
lemma_
))
for
token
in
docx
]
return
allData
# Function For Extracting Entities
@st.cache
def
entity_analyzer
(
my_text
):
nlp
=
spacy
.
load
(
"
en
"
)
docx
=
nlp
(
my_text
)
tokens
=
[
token
.
text
for
token
in
docx
]
entities
=
[(
entity
.
text
,
entity
.
label_
)
for
entity
in
docx
.
ents
]
allData
=
[
'"
Token
"
:{},
\n
"
Entities
"
:{}
'
.
format
(
tokens
,
entities
)]
return
allData
def
main
():
"""
NLP Based App with Streamlit
"""
# Title
st
.
title
(
"
NLPiffy with Streamlit
"
)
st
.
subheader
(
"
Natural Language Processing On the Go..
"
)
# Tokenization
if
st
.
checkbox
(
"
Show Tokens and Lemma
"
):
st
.
subheader
(
"
Tokenize Your Text
"
)
message
=
st
.
text_area
(
"
Enter Text
"
,
"
Type Here ..
"
)
if
st
.
button
(
"
Analyze
"
):
nlp_result
=
text_analyzer
(
message
)
st
.
json
(
nlp_result
)
# Entity Extraction
if
st
.
checkbox
(
"
Show Named Entities
"
):
st
.
subheader
(
"
Analyze Your Text
"
)
message
=
st
.
text_area
(
"
Enter Text
"
,
"
Type Here ..
"
)
if
st
.
button
(
"
Extract
"
):
entity_result
=
entity_analyzer
(
message
)
st
.
json
(
entity_result
)
# Sentiment Analysis
if
st
.
checkbox
(
"
Show Sentiment Analysis
"
):
st
.
subheader
(
"
Analyse Your Text
"
)
message
=
st
.
text_area
(
"
Enter Text
"
,
"
Type Here ..
"
)
if
st
.
button
(
"
Analyze
"
):
blob
=
TextBlob
(
message
)
result_sentiment
=
blob
.
sentiment
st
.
success
(
result_sentiment
)
# Summarization
if
st
.
checkbox
(
"
Show Text Summarization
"
):
st
.
subheader
(
"
Summarize Your Text
"
)
message
=
st
.
text_area
(
"
Enter Text
"
,
"
Type Here ..
"
)
summary_options
=
st
.
selectbox
(
"
Choose Summarizer
"
,
[
"
sumy
"
,
"
gensim
"
])
if
st
.
button
(
"
Summarize
"
):
if
summary_options
==
"
sumy
"
:
st
.
text
(
"
Using Sumy Summarizer ..
"
)
summary_result
=
sumy_summarizer
(
message
)
elif
summary_options
==
"
gensim
"
:
st
.
text
(
"
Using Gensim Summarizer ..
"
)
summary_result
=
summarize
(
rawtext
)
else
:
st
.
warning
(
"
Using Default Summarizer
"
)
st
.
text
(
"
Using Gensim Summarizer ..
"
)
summary_result
=
summarize
(
rawtext
)
st
.
success
(
summary_result
)
st
.
sidebar
.
subheader
(
"
About App
"
)
st
.
sidebar
.
text
(
"
NLPiffy App with Streamlit
"
)
st
.
sidebar
.
info
(
"
Cudos to the Streamlit Team
"
)
st
.
sidebar
.
subheader
(
"
By
"
)
st
.
sidebar
.
text
(
"
Jesse E.Agbe(JCharis)
"
)
st
.
sidebar
.
text
(
"
Jesus saves@JCharisTech
"
)
if
__name__
==
"
__main__
"
:
main
()
import
base64
import
os
import
json
import
pickle
import
uuid
import
re
import
streamlit
as
st
import
pandas
as
pd
def
download_button
(
object_to_download
,
download_filename
,
button_text
,
pickle_it
=
False
):
"""
Generates a link to download the given object_to_download.
Params:
------
object_to_download: The object to be downloaded.
download_filename (str): filename and extension of file. e.g. mydata.csv,
some_txt_output.txt download_link_text (str): Text to display for download
link.
button_text (str): Text to display on download button (e.g.
'
click here to download file
'
)
pickle_it (bool): If True, pickle file.
Returns:
-------
(str): the anchor tag to download object_to_download
Examples:
--------
download_link(your_df,
'
YOUR_DF.csv
'
,
'
Click to download data!
'
)
download_link(your_str,
'
YOUR_STRING.txt
'
,
'
Click to download text!
'
)
"""
if
pickle_it
:
try
:
object_to_download
=
pickle
.
dumps
(
object_to_download
)
except
pickle
.
PicklingError
as
e
:
st
.
write
(
e
)
return
None
else
:
if
isinstance
(
object_to_download
,
bytes
):
pass
elif
isinstance
(
object_to_download
,
pd
.
DataFrame
):
object_to_download
=
object_to_download
.
to_csv
(
index
=
False
)
# Try JSON encode for everything else
else
:
object_to_download
=
json
.
dumps
(
object_to_download
)
try
:
# some strings <-> bytes conversions necessary here
b64
=
base64
.
b64encode
(
object_to_download
.
encode
()).
decode
()
except
AttributeError
as
e
:
b64
=
base64
.
b64encode
(
object_to_download
).
decode
()
button_uuid
=
str
(
uuid
.
uuid4
()).
replace
(
"
-
"
,
""
)
button_id
=
re
.
sub
(
"
\d+
"
,
""
,
button_uuid
)
custom_css
=
f
"""
<style>
#
{
button_id
}
{{
background-color: rgb(255, 255, 255);
color: rgb(38, 39, 48);
padding: 0.25em 0.38em;
position: relative;
text-decoration: none;
border-radius: 4px;
border-width: 1px;
border-style: solid;
border-color: rgb(230, 234, 241);
border-image: initial;
}}
#
{
button_id
}
:hover {{
border-color: rgb(246, 51, 102);
color: rgb(246, 51, 102);
}}
#
{
button_id
}
:active {{
box-shadow: none;
background-color: rgb(246, 51, 102);
color: white;
}}
</style>
"""
dl_link
=
(
custom_css
+
f
'
<a download=
"
{
download_filename
}
"
id=
"
{
button_id
}
"
href=
"
data:file/txt;base64,
{
b64
}
"
>
{
button_text
}
</a><br></br>
'
)
return
dl_link
def
file_selector
(
folder_path
=
"
.
"
):
filenames
=
os
.
listdir
(
folder_path
)
selected_filename
=
st
.
selectbox
(
"
Select a file
"
,
filenames
)
return
os
.
path
.
join
(
folder_path
,
selected_filename
)
def
download_pickle
():
# ---------------------
# Download from memory
# ---------------------
if
st
.
checkbox
(
"
Download object from memory
"
):
st
.
write
(
"
~> Use if you want to save some data from memory (e.g. pd.DataFrame, dict, list, str, int)
"
)
# Enter text for testing
s
=
st
.
selectbox
(
"
Select dtype
"
,
[
"
list
"
,
# TODO: Add more
"
str
"
,
"
int
"
,
"
float
"
,
"
dict
"
,
"
bool
"
,
"
pd.DataFrame
"
,
],
)
filename
=
st
.
text_input
(
"
Enter output filename and ext (e.g. my-dataframe.csv, my-file.json, my-list.txt)
"
,
"
my-file.json
"
,
)
# Pickle Rick
pickle_it
=
st
.
checkbox
(
"
Save as pickle file
"
)
sample_df
=
pd
.
DataFrame
({
"
x
"
:
list
(
range
(
10
)),
"
y
"
:
list
(
range
(
10
))})
sample_dtypes
=
{
"
list
"
:
[
1
,
"
a
"
,
[
2
,
"
c
"
],
{
"
b
"
:
2
}],
"
str
"
:
"
Hello Streamlit!
"
,
"
int
"
:
17
,
"
float
"
:
17.0
,
"
dict
"
:
{
1
:
"
a
"
,
"
x
"
:
[
2
,
"
c
"
],
2
:
{
"
b
"
:
2
}},
"
bool
"
:
True
,
"
pd.DataFrame
"
:
sample_df
,
}
# Display sample data
st
.
write
(
f
"
#### Sample `
{
s
}
` to be saved to `
{
filename
}
`
"
)
st
.
code
(
sample_dtypes
[
s
],
language
=
"
python
"
)
# Download sample
download_button_str
=
download_button
(
sample_dtypes
[
s
],
filename
,
f
"
Click here to download
{
filename
}
"
,
pickle_it
=
pickle_it
,
)
st
.
markdown
(
download_button_str
,
unsafe_allow_html
=
True
)
if
st
.
checkbox
(
"
Show code example
"
):
code_text
=
f
"""
s =
{
sample_dtypes
[
s
]
}
download_button_str = download_button(s,
'
{
filename
}
'
,
'
Click here to download
{
filename
}
'
, pickle_it=
{
pickle_it
}
)
st.markdown(download_button_str, unsafe_allow_html=True)
"""
st
.
code
(
code_text
,
language
=
"
python
"
)
# --------------------------
# Select a file to download
# --------------------------
if
st
.
checkbox
(
"
Select a file to download
"
):
st
.
write
(
"
~> Use if you want to test uploading / downloading a certain file.
"
)
# Upload file for testing
folder_path
=
st
.
text_input
(
"
Enter directory: deafult .
"
,
"
.
"
)
filename
=
file_selector
(
folder_path
=
folder_path
)
# Load selected file
with
open
(
filename
,
"
rb
"
)
as
f
:
s
=
f
.
read
()
download_button_str
=
download_button
(
s
,
filename
,
f
"
Click here to download
{
filename
}
"
)
st
.
markdown
(
download_button_str
,
unsafe_allow_html
=
True
)
if
st
.
checkbox
(
"
Show code example
"
):
code_text
=
f
"""
with open(
'
{
filename
}
'
,
'
rb
'
) as f:
s = f.read()
download_button_str = download_button(s,
'
{
filename
}
'
,
'
Click here to download
{
filename
}
'
)
st.markdown(download_button_str, unsafe_allow_html=True)
"""
st
.
code
(
code_text
,
language
=
"
python
"
)
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment