diff --git a/science_access/online_app_backend.py b/science_access/online_app_backend.py
index ea97ab2371ce3a0297a1314472ca656316e12773..0ee4114bdcdc1216df32072c1ebbf1e6fdfaf23a 100644
--- a/science_access/online_app_backend.py
+++ b/science_access/online_app_backend.py
@@ -162,26 +162,32 @@ def ar_manipulation(ar):
     trainingDats.extend(ar)
     return (ar, trainingDats)
 
+import os
+from crossref_commons.iteration import iterate_publications_as_json
 
-from duckduckpy import query
-#@st.cache(suppress_st_warning=True)
 def call_from_front_end(NAME):
     if not heroku:
         scholar_link=str('https://scholar.google.com/scholar?hl=en&as_sdt=0%2C3&q=')+str(NAME)
+        for link in scholar_link:
+            st.text(link) 
+
         _, _, ar  = enter_name_here(scholar_link,NAME)
 
 
     if heroku:
-        scholar_link=str('https://www.researchgate.net/search?q=')+str(NAME)#+str('&sort=relevance')
-        #query_string = str('!scholar ')+NAME
-        #response = query(query_string, secure=False, container=u'namedtuple', verbose=False,
-        #user_agent=u'duckduckpy 0.2', no_redirect=False, no_html=False,
-        #skip_disambig=False)
-        #scholar_link = response[-1]
-        sleep(np.random.uniform(1,3))
-        st.write(scholar_link)
-        _, _, ar  = enter_name_here(scholar_link,NAME)
-        st.write(ar)
+        filter_ = {'type': 'journal-article'}
+        queries = {'query.author': NAME}
+        ar = []
+        bi =[p for p in iterate_publications_as_json(max_results=130, filter=filter_, queries=queries)]   
+        for i in bi[0:15]    
+            temp=str('https://unpaywall.org/'+str(p['DOI'])) 
+            st.text(temp) 
+            urlDat = process(temp)        
+            if not isinstance(urlDat,type(None)):
+                ar.append(urlDat)
+                st.text(urlDat) 
+
+
     (ar, trainingDats) = ar_manipulation(ar)
     '''
     with open('data/traingDats.p','rb') as f:            
diff --git a/try.py b/try.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c2da21a542ce5e82bf35259929d4f943a216817
--- /dev/null
+++ b/try.py
@@ -0,0 +1,10 @@
+from crossref_commons.iteration import iterate_publications_as_json
+filter = {'type': 'journal-article'}
+queries = {'query.author': 'McGurrin'}
+import os
+for p in iterate_publications_as_json(max_results=189, filter=filter, queries=queries):   
+    if 'abstract' in p.keys(): 
+        print(p.keys()) 
+        temp='wget https://unpaywall.org/'+str(p['DOI']) 
+        print(temp) 
+        os.system(temp)