Update app.py
Browse files
app.py
CHANGED
|
@@ -7,6 +7,56 @@ query_text = 'Query used for keyword search (you can also edit, and experiment w
|
|
| 7 |
written_question = st.text_input(query_text, question)
|
| 8 |
if written_question:
|
| 9 |
question = written_question
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
if st.button('Run keyword search'):
|
| 11 |
if question:
|
| 12 |
try:
|
|
|
|
| 7 |
written_question = st.text_input(query_text, question)
|
| 8 |
if written_question:
|
| 9 |
question = written_question
|
| 10 |
+
|
| 11 |
+
if st.button('Run semantic question answering'):
|
| 12 |
+
if question:
|
| 13 |
+
try:
|
| 14 |
+
url = f"{ES_URL}/document/_search?pretty"
|
| 15 |
+
# payload = json.dumps({"query":{"match":{"content":"moldova"}}})
|
| 16 |
+
payload = json.dumps({"query": {
|
| 17 |
+
"more_like_this": { "like": question, # "What is the capital city of Netherlands?"
|
| 18 |
+
"fields": ["content"], "min_term_freq": 1.9, "min_doc_freq": 4, "max_query_terms": 50
|
| 19 |
+
}}})
|
| 20 |
+
headers = {'Content-Type': 'application/json'}
|
| 21 |
+
response = requests.request("GET", url, headers=headers, data=payload)
|
| 22 |
+
kws_result = response.json() # print(response.text)
|
| 23 |
+
|
| 24 |
+
except Exception as e:
|
| 25 |
+
qa_result = str(e)
|
| 26 |
+
|
| 27 |
+
top_5_hits = kws_result['hits']['hits'][:5] # print("First 5 results:")
|
| 28 |
+
top_5_text = [{'text': hit['_source']['content'][:500],
|
| 29 |
+
'confidence': hit['_score']} for hit in top_5_hits ]
|
| 30 |
+
top_5_para = [hit['_source']['content'][:5000] for hit in top_5_hits]
|
| 31 |
+
|
| 32 |
+
DPR_MODEL = "deepset/roberta-base-squad2" #, model="distilbert-base-cased-distilled-squad"
|
| 33 |
+
pipe_exqa = pipeline("question-answering", model=DPR_MODEL)
|
| 34 |
+
qa_results = [pipe_exqa(question=question, context=paragraph) for paragraph in top_5_para]
|
| 35 |
+
|
| 36 |
+
for i, qa_result in enumerate(qa_results):
|
| 37 |
+
if "answer" in qa_result.keys():
|
| 38 |
+
answer_span, answer_score = qa_result["answer"], qa_result["score"]
|
| 39 |
+
st.write(f'Answer: **{answer_span}**')
|
| 40 |
+
paragraph = top_5_para[i]
|
| 41 |
+
start_par, stop_para = max(0, qa_result["start"]-86), min(qa_result["end"]+90, len(paragraph))
|
| 42 |
+
answer_context = paragraph[start_par:stop_para].replace(answer_span, f'**{answer_span}**')
|
| 43 |
+
st.write(f'Answer context (and score): ... _{answer_context}_ ...')
|
| 44 |
+
st.write(f'(answer confidence: {format(answer_score, ".3f")})')
|
| 45 |
+
|
| 46 |
+
st.write(f'Answers JSON: '); st.write(qa_results)
|
| 47 |
+
|
| 48 |
+
for i, doc_hit in enumerate(top_5_text):
|
| 49 |
+
st.subheader(f'Search result #{i+1} (and score):')
|
| 50 |
+
st.write(f'<em>{doc_hit["text"]}...</em>', unsafe_allow_html = True)
|
| 51 |
+
st.markdown(f'> (*confidence score*: **{format(doc_hit["confidence"], ".3f")}**)')
|
| 52 |
+
|
| 53 |
+
st.write(f'Search results JSON: '); st.write(top_5_text)
|
| 54 |
+
else:
|
| 55 |
+
st.write('Write a query to submit your keyword search'); st.stop()
|
| 56 |
+
|
| 57 |
+
# question_similarity = [ (hit['_score'], hit['_source']['content'][:200])
|
| 58 |
+
# for hit in result_first_two_hits ] # print(question_similarity)
|
| 59 |
+
|
| 60 |
if st.button('Run keyword search'):
|
| 61 |
if question:
|
| 62 |
try:
|