diff --git a/promptsource/templates/GEM/wiki_lingua/en/templates.yaml b/promptsource/templates/GEM/wiki_lingua/en/templates.yaml index e64a29a80..80a12df83 100644 --- a/promptsource/templates/GEM/wiki_lingua/en/templates.yaml +++ b/promptsource/templates/GEM/wiki_lingua/en/templates.yaml @@ -82,3 +82,30 @@ templates: original_task: true name: write_abstract_en reference: xsum 'read_below_DOC_write_abstract' template + dff7b414-7385-4855-bb90-253073a34fde: !Template + answer_choices: null + id: dff7b414-7385-4855-bb90-253073a34fde + jinja: "{{target}}\n\nGiven the above abstract, write an English article for it. ||| {{source}}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: [] + metrics: + - ROUGE + - BLEU + original_task: true + name: xp3longwritearticle + reference: '' + dff7b415-7385-4855-bb90-253073a34fde: !Template + answer_choices: null + id: dff7b415-7385-4855-bb90-253073a34fde + jinja: "{{target}}\n\nI'm interested in that, but I only have a few mins. + Can you give me the first 500 characters of an article about that? ||| {{source[:500]}}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: [] + metrics: + - ROUGE + - BLEU + original_task: true + name: xp3longchars + reference: '' diff --git a/promptsource/templates/GEM/wiki_lingua/en_en/templates.yaml b/promptsource/templates/GEM/wiki_lingua/en_en/templates.yaml new file mode 100644 index 000000000..b6cd8b553 --- /dev/null +++ b/promptsource/templates/GEM/wiki_lingua/en_en/templates.yaml @@ -0,0 +1,111 @@ +dataset: GEM/wiki_lingua +subset: en_en +templates: + 088288f3-7516-4cf7-9406-0e082053bf54: !Template + answer_choices: null + id: 088288f3-7516-4cf7-9406-0e082053bf54 + jinja: '{{source}} + + + === + + Write a summary of the previous text in {{target_language_name}}: ||| {{target}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: [] + metrics: + - ROUGE + - BLEU + original_task: true + name: summarize_above_en + reference: xsum DOC_write_summary_of_above template + 2038df7b-5420-4a33-87ec-09715419deef: !Template + answer_choices: null + id: 2038df7b-5420-4a33-87ec-09715419deef + jinja: 'Source in {{source_language_name}}: {{source}} + + + Summary in {{target_language_name}}: ||| {{target}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: [] + metrics: + - ROUGE + - BLEU + original_task: true + name: article_summary_en + reference: xsum 'article_DOC_summary' template + 753f0a46-aeff-4cd2-932c-8548897cebe5: !Template + answer_choices: null + id: 753f0a46-aeff-4cd2-932c-8548897cebe5 + jinja: '{{source}} + + + How would you rephrase that briefly using {{target_language_name}}? ||| {{target}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: [] + metrics: + - ROUGE + - BLEU + original_task: true + name: rephrase_en + reference: xsum 'DOC_how_would_you_rephrase_few_words' template + d3c5baa3-5e37-46f8-b1b2-5b834181c9da: !Template + answer_choices: null + id: d3c5baa3-5e37-46f8-b1b2-5b834181c9da + jinja: '{{source}} + + + TL;DR in {{target_language_name}}: ||| {{target}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: [] + metrics: + - ROUGE + - BLEU + original_task: true + name: tldr_en + reference: xsum 'DOC_tldr' template + dff7b314-7385-4855-bb90-253073a34fde: !Template + answer_choices: null + id: dff7b314-7385-4855-bb90-253073a34fde + jinja: "First, read the {{source_language_name}} text below.\n\n{{source}} \n\nNow, please write\ + \ a short abstract for it in {{target_language_name}}. Abstract: ||| {{target}}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: [] + metrics: + - ROUGE + - BLEU + original_task: true + name: write_abstract_en + reference: xsum 'read_below_DOC_write_abstract' template + dfa7b514-7385-4855-bb90-253073a34fde: !Template + answer_choices: null + id: dfa7b514-7385-4855-bb90-253073a34fde + jinja: "{{target}}\n\nGiven the above summary, write a detailed text in {{source_language_name}} for it. ||| {{source}}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: [] + metrics: + - ROUGE + - BLEU + original_task: true + name: xp3longwritearticle + reference: '' + dff8b414-7485-4855-bb90-253073a34fde: !Template + answer_choices: null + id: dff8b414-7485-4855-bb90-253073a34fde + jinja: "{{target}}\n\nI'm interested in that, but I only have a few mins. + Can you give me at most the first 500 characters of a detailed explanation + in {{source_language_name}} about that? ||| {{source[:500]}}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: [] + metrics: + - ROUGE + - BLEU + original_task: true + name: xp3longchars + reference: '' diff --git a/promptsource/templates/GEM/xlsum/english/templates.yaml b/promptsource/templates/GEM/xlsum/english/templates.yaml new file mode 100644 index 000000000..15b95969d --- /dev/null +++ b/promptsource/templates/GEM/xlsum/english/templates.yaml @@ -0,0 +1,54 @@ + dc0096ea-e9db-4e96-85b4-0740085fee55: !Template + answer_choices: null + id: dc0096ea-e9db-4e96-85b4-0740085fee55 + jinja: 'Given the below title and summary of an article, generate a short article or the beginning of a long article to go along with them. + Title: {{title}}\nSummary: {{target}}\nArticle (Max 500 characters): ||| {{text[:500]}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: [] + metrics: + - ROUGE + - BLEU + original_task: true + name: xp3longgenarticle + reference: '' + hd0097ea-e9db-4e96-85b4-0740085fee55: !Template + answer_choices: null + id: hd0097ea-e9db-4e96-85b4-0740085fee55 + jinja: 'Title: {{title}}\nGiven the above title of an imaginary article, imagine the article.\n ||| {{text[:7000]}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: [] + metrics: + - ROUGE + - BLEU + original_task: true + name: xp3longimaginearticle + reference: '' + hc0099ea-e9db-4e96-85b4-0740085fee55: !Template + answer_choices: null + id: hc0099ea-e9db-4e96-85b4-0740085fee55 + jinja: '{{text[:1000]}}... Continue the article for another 4000 characters max: ||| {{text[1000:5000]}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: [] + metrics: + - ROUGE + - BLEU + original_task: true + name: xp3longcontinue + reference: '' + hc0196ea-e9db-4e96-85b4-0740085fee55: !Template + answer_choices: null + id: hc0196ea-e9db-4e96-85b4-0740085fee55 + jinja: '...{{text[3000:3500]}}... Write the rest of the article: ||| + {{text[5000:]}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: [] + metrics: + - ROUGE + - BLEU + original_task: true + name: xp3longrest + reference: '' diff --git a/promptsource/templates/adversarial_qa/dbert/templates.yaml b/promptsource/templates/adversarial_qa/dbert/templates.yaml index 6a2a33629..e3d9d60e1 100644 --- a/promptsource/templates/adversarial_qa/dbert/templates.yaml +++ b/promptsource/templates/adversarial_qa/dbert/templates.yaml @@ -118,3 +118,35 @@ templates: original_task: true name: answer_the_following_q reference: 'Input: QC, Output: Answer' + b64d5a15-68e2-4d1c-b30a-ca8250c860fa: !Template + answer_choices: null + id: b64d5a15-68e2-4d1c-b30a-ca8250c860fa + jinja: '{{question}} Given the previous question, write a context that contains the answer. It can be 1 - 20 sentences. Context: + ||| + {{context}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Squad + original_task: true + name: xp3longwritecontext + reference: '' + c64d5a15-68e2-4d1c-b30a-ca8250c860fa: !Template + answer_choices: null + id: c64d5a15-68e2-4d1c-b30a-ca8250c860fa + jinja: '{% if metadata.split != "test" %} + Generate a few sentences of context that can be used to answer the question {{question}}. + The answer is "{{answers.text | choice}}" and should appear in the context. + Generate after this sentence. ||| {{context}} + {% endif %}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Squad + original_task: true + name: xp3longgeneratecontext + reference: '' diff --git a/promptsource/templates/adversarial_qa/dbidaf/templates.yaml b/promptsource/templates/adversarial_qa/dbidaf/templates.yaml index 047946f71..529639b19 100644 --- a/promptsource/templates/adversarial_qa/dbidaf/templates.yaml +++ b/promptsource/templates/adversarial_qa/dbidaf/templates.yaml @@ -118,3 +118,35 @@ templates: original_task: true name: question_context_answer reference: 'Input: QC, Output: Answer (short form)' + e84d5a15-68e2-4d1c-b30a-ca8250c860fa: !Template + answer_choices: null + id: e84d5a15-68e2-4d1c-b30a-ca8250c860fa + jinja: '{{question}} Given the previous question, write a context that contains the answer. It can be 1 - 20 sentences. Context: + ||| + {{context}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Squad + original_task: true + name: xp3longwritecontext + reference: '' + e65d5a15-68e2-4d1c-b30a-ca8250c860fa: !Template + answer_choices: null + id: e65d5a15-68e2-4d1c-b30a-ca8250c860fa + jinja: '{% if metadata.split != "test" %} + Generate a few sentences of context that can be used to answer the question {{question}}. + The answer is "{{answers.text | choice}}" and should appear in the context. + Generate after this sentence. ||| {{context}} + {% endif %}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Squad + original_task: true + name: xp3longgeneratecontext + reference: '' diff --git a/promptsource/templates/adversarial_qa/droberta/templates.yaml b/promptsource/templates/adversarial_qa/droberta/templates.yaml index ef3a2ef6f..0ebfe228f 100644 --- a/promptsource/templates/adversarial_qa/droberta/templates.yaml +++ b/promptsource/templates/adversarial_qa/droberta/templates.yaml @@ -118,3 +118,35 @@ templates: original_task: true name: answer_the_following_q reference: 'Input: QC, Output: Answer' + e86d5a15-68e2-4d1c-b30a-ca8250c860fa: !Template + answer_choices: null + id: e86d5a15-68e2-4d1c-b30a-ca8250c860fa + jinja: '{{question}} Given the previous question, write a context that contains the answer. It can be 1 - 20 sentences. Context: + ||| + {{context}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Squad + original_task: true + name: xp3longwritecontext + reference: '' + e66d5a15-68e2-4d1c-b30a-ca8250c860fa: !Template + answer_choices: null + id: e66d5a15-68e2-4d1c-b30a-ca8250c860fa + jinja: '{% if metadata.split != "test" %} + Generate a few sentences of context that can be used to answer the question {{question}}. + The answer is "{{answers.text | choice}}" and should appear in the context. + Generate after this sentence. ||| {{context}} + {% endif %}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Squad + original_task: true + name: xp3longgeneratecontext + reference: '' diff --git a/promptsource/templates/ag_news/templates.yaml b/promptsource/templates/ag_news/templates.yaml index cdfdf6acb..49029ca2d 100644 --- a/promptsource/templates/ag_news/templates.yaml +++ b/promptsource/templates/ag_news/templates.yaml @@ -106,3 +106,30 @@ templates: original_task: true name: classify reference: '' + cc355f33-7e8c-4455-a72b-48d315bd4f60: !Template + answer_choices: World politics ||| Sports ||| Business ||| Science and technology + id: cc355f33-7e8c-4455-a72b-48d315bd4f60 + jinja: "Generate a news article on the topic of {{answer_choices[label]}}. Article: ||| {{text}}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: xp3longgenerate + reference: '' + cb355f34-7e8c-4455-a72b-48d315bd4f60: !Template + answer_choices: Politician ||| Athlete ||| Business executive ||| Scientist + id: cb355f34-7e8c-4455-a72b-48d315bd4f60 + jinja: "Imagine talking to a {{answer_choices[label]}}. + Imagine a news article that would interest them: ||| {{text}}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: xp3longimagine + reference: '' diff --git a/promptsource/templates/amazon_polarity/templates.yaml b/promptsource/templates/amazon_polarity/templates.yaml index 4558071be..6a0754b80 100644 --- a/promptsource/templates/amazon_polarity/templates.yaml +++ b/promptsource/templates/amazon_polarity/templates.yaml @@ -190,3 +190,30 @@ templates: original_task: true name: flattering_or_not reference: '' + b23369e8-0500-4e93-90d4-8e6814bfb99b: !Template + answer_choices: negative ||| positive + id: b23369e8-0500-4e93-90d4-8e6814bfb99b + jinja: 'Write a {{answer_choices[label]}} review with the title "{{title}}". + Review: ||| {{content}}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: xp3longwritereview + reference: '' + b25369e8-0500-4e93-90d4-8e6814bfb99b: !Template + answer_choices: negative ||| positive + id: b25369e8-0500-4e93-90d4-8e6814bfb99b + jinja: 'Generate an imaginary product review titled: {{title}}. Review: ||| {{content}}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: xp3longimaginereview + reference: '' diff --git a/promptsource/templates/clue/cmrc2018/templates.yaml b/promptsource/templates/clue/cmrc2018/templates.yaml new file mode 100644 index 000000000..a93111f8c --- /dev/null +++ b/promptsource/templates/clue/cmrc2018/templates.yaml @@ -0,0 +1,29 @@ + 9fc15385-814e-419a-b862-2d4e06a58ef6: !Template + answer_choices: null + id: 9fc15385-814e-419a-b862-2d4e06a58ef6 + jinja: 'Q: {{ question }} + Can you write some context to answer the question? ||| {{ context }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - zh + metrics: + - Squad + original_task: true + name: xp3longctxt + reference: '' + 9fc25385-814e-419a-b862-2d4e06a58ef6: !Template + answer_choices: null + id: 9fc25385-814e-419a-b862-2d4e06a58ef6 + jinja: '{{ context[:answers["answer_start"][0]-5] }}... How would you + continue the prior text to answer "{{ question }}"? + ||| {{ context[answers["answer_start"][0]-5:] }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - zh + metrics: + - Squad + original_task: true + name: xp3longcontinue + reference: '' diff --git a/promptsource/templates/clue/csl/templates.yaml b/promptsource/templates/clue/csl/templates.yaml new file mode 100644 index 000000000..7915291ef --- /dev/null +++ b/promptsource/templates/clue/csl/templates.yaml @@ -0,0 +1,17 @@ + aff47f6f-fd8f-4180-8d85-e4c7df088ac6: !Template + answer_choices: no ||| yes + id: aff47f6f-fd8f-4180-8d85-e4c7df088ac6 + jinja: 'Write an abstract about "{{ keyword | join('', '') }}": + ||| + {% if label == 1 %} + {{abst}} + {% endif %}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - zh + metrics: + - Accuracy + original_task: true + name: xp3longabst + reference: '' diff --git a/promptsource/templates/clue/drcd/templates.yaml b/promptsource/templates/clue/drcd/templates.yaml new file mode 100644 index 000000000..d62326fe4 --- /dev/null +++ b/promptsource/templates/clue/drcd/templates.yaml @@ -0,0 +1,30 @@ + 9fc35386-814e-419a-b862-2d4e06a58ef6: !Template + answer_choices: null + id: 9fc35386-814e-419a-b862-2d4e06a58ef6 + jinja: '{{context[:answers["answer_start"]-5]}}... How would you + continue the prior text to answer "{{ question }}"? + ||| {{context[answers["answer_start"]-5:]}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - zh + metrics: + - Squad + original_task: true + name: xp3longcontinue + reference: '' + b4684f23-b191-4e6d-9dc5-12b1d7d4cf49: !Template + answer_choices: null + id: b4684f23-b191-4e6d-9dc5-12b1d7d4cf49 + jinja: "In an exam, you are asked {{ question }}, and you are tasked to find a passage + answering the question. Write such a passage: ||| + {{ context }}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - zh + metrics: + - Squad + original_task: true + name: xp3longpassage + reference: '' diff --git a/promptsource/templates/cosmos_qa/templates.yaml b/promptsource/templates/cosmos_qa/templates.yaml index 5fe9c1b39..280b21c8c 100644 --- a/promptsource/templates/cosmos_qa/templates.yaml +++ b/promptsource/templates/cosmos_qa/templates.yaml @@ -286,3 +286,16 @@ templates: original_task: false name: only_question_answer reference: Template with only question and generates the answer + f640e365-091c-491e-a87e-f529514607e5: !Template + answer_choices: '{{answer0}} ||| {{answer1}} ||| {{answer2}} ||| {{answer3}}' + id: f640e365-091c-491e-a87e-f529514607e5 + jinja: "<<{{question}}>> What is the context for this question? \n|||\n{{context}}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: false + name: xp3longcontext + reference: '' diff --git a/promptsource/templates/dbpedia_14/templates.yaml b/promptsource/templates/dbpedia_14/templates.yaml index 2022b54dd..9cd71c5a9 100644 --- a/promptsource/templates/dbpedia_14/templates.yaml +++ b/promptsource/templates/dbpedia_14/templates.yaml @@ -78,3 +78,19 @@ templates: original_task: true name: given_a_list_of_category_what_does_the_title_belong_to reference: '' + f72fb410-3278-4f62-91f0-f9edf4a4e792: !Template + answer_choices: Company ||| Educational Institution ||| Artist ||| Athlete ||| + Office Holder ||| Mean Of Transportation ||| Building ||| Natural Place ||| + Village ||| Animal ||| Plant ||| Album ||| Film ||| Written Work + id: f72fb410-3278-4f62-91f0-f9edf4a4e792 + jinja: '"{{title}}". Write 1 - 5 sentences on the previous title. + ||| {{content}}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: xp3longwritetext + reference: '' diff --git a/promptsource/templates/duorc/ParaphraseRC/templates.yaml b/promptsource/templates/duorc/ParaphraseRC/templates.yaml index 32e8f6a0a..5cfb6c6e9 100644 --- a/promptsource/templates/duorc/ParaphraseRC/templates.yaml +++ b/promptsource/templates/duorc/ParaphraseRC/templates.yaml @@ -239,3 +239,48 @@ templates: name: generate_question_by_answer reference: Given the passage and the answer, generate a question which has that answer. + d786ac96-de6b-403a-8628-5adb23252194: !Template + answer_choices: null + id: d786ac96-de6b-403a-8628-5adb23252194 + jinja: 'Here is a title for a movie plot: {{title}}. What could the plot be? + ||| {{plot}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - BLEU + - ROUGE + original_task: false + name: xp3longtitleplot + reference: '' + f586ac96-de6b-403a-8628-5adb23252194: !Template + answer_choices: null + id: f586ac96-de6b-403a-8628-5adb23252194 + jinja: 'Given the title <<{{title}}>> and the question <<{{question}}>>, write a movie story: + ||| {{plot}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - BLEU + - ROUGE + original_task: false + name: xp3longwritestory + reference: '' + f487bc96-de6b-403a-8628-5adb23252194: !Template + answer_choices: null + id: f487bc96-de6b-403a-8628-5adb23252194 + jinja: 'Finish the below start of a movie plot!\n{{plot[:10]}}: + ||| {{plot[10:]}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - BLEU + - ROUGE + original_task: false + name: xp3longfinishplot + reference: '' diff --git a/promptsource/templates/duorc/SelfRC/templates.yaml b/promptsource/templates/duorc/SelfRC/templates.yaml index 6a7a94929..77aa741bc 100644 --- a/promptsource/templates/duorc/SelfRC/templates.yaml +++ b/promptsource/templates/duorc/SelfRC/templates.yaml @@ -239,3 +239,48 @@ templates: original_task: true name: decide_worth_it reference: '' + d586ac96-de6b-403a-8628-5adb23252194: !Template + answer_choices: null + id: d586ac96-de6b-403a-8628-5adb23252194 + jinja: 'Here is a title for a movie plot: {{title}}. What could the plot be? + ||| {{plot}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - BLEU + - ROUGE + original_task: false + name: xp3longtitleplot + reference: '' + f486ac96-de6b-403a-8628-5adb23252194: !Template + answer_choices: null + id: f486ac96-de6b-403a-8628-5adb23252194 + jinja: 'Given the title <<{{title}}>> and the question <<{{question}}>>, write a movie story: + ||| {{plot}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - BLEU + - ROUGE + original_task: false + name: xp3longwritestory + reference: '' + f487ac96-de6b-403a-8628-5adb23252194: !Template + answer_choices: null + id: f487ac96-de6b-403a-8628-5adb23252194 + jinja: 'Finish the below start of a movie plot!\n{{plot[:10]}}: + ||| {{plot[10:]}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - BLEU + - ROUGE + original_task: false + name: xp3longfinishplot + reference: '' diff --git a/promptsource/templates/imdb/templates.yaml b/promptsource/templates/imdb/templates.yaml index 92d2c93a4..e63966af8 100644 --- a/promptsource/templates/imdb/templates.yaml +++ b/promptsource/templates/imdb/templates.yaml @@ -153,3 +153,17 @@ templates: original_task: true name: Reviewer Enjoyment reference: '' + e11970ab-42c0-4e6e-a08f-4940d889ef37: !Template + answer_choices: frustrating ||| positive + id: e11970ab-42c0-4e6e-a08f-4940d889ef37 + jinja: 'Write a movie review about your {{answer_choices[label]}} + experience with a movie. ||| {{text}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: xp3longreview + reference: '' diff --git a/promptsource/templates/khalidalt/tyidiqa-goldp/english/templates.yaml b/promptsource/templates/khalidalt/tyidiqa-goldp/english/templates.yaml new file mode 100644 index 000000000..af983fea9 --- /dev/null +++ b/promptsource/templates/khalidalt/tyidiqa-goldp/english/templates.yaml @@ -0,0 +1,31 @@ + fcf0920f-6599-44a6-bf2a-9ef6bbbe1e64: !Template + answer_choices: null + id: fcf0920f-6599-44a6-bf2a-9ef6bbbe1e64 + jinja: 'Rumor has it that {{answers.text | choice}} is the answer to {{question_text}} + + I am skeptical... Can you prove the above by writing a Wikipedia-style article of + at most 8000 characters in {{language}}? ||| {{passage_text[:8000]}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Squad + original_task: true + name: xp3longwiki + reference: '' + fcf0921f-6599-45a6-bf2a-9ef6bbbe1e64: !Template + answer_choices: null + id: fcf0921f-6599-45a6-bf2a-9ef6bbbe1e64 + jinja: 'I want to teach my class about the topic of {{document_title}}. + Can you write me an article that I can give them to read as a homework? + It should answer {{question_text}}, be in {{language}} and be formal. Please. ||| {{passage_text}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Squad + original_task: true + name: xp3longarticle + reference: '' diff --git a/promptsource/templates/khalidalt/tyidiqa-primary/english/templates.yaml b/promptsource/templates/khalidalt/tyidiqa-primary/english/templates.yaml new file mode 100644 index 000000000..8b87708a1 --- /dev/null +++ b/promptsource/templates/khalidalt/tyidiqa-primary/english/templates.yaml @@ -0,0 +1,29 @@ + 9d42e3fd-d46e-4149-bb60-4b3118104d95: !Template + answer_choices: Yes ||| No + id: 9d42e3fd-d46e-4149-bb60-4b3118104d95 + jinja: "I wonder {{question_text}}\n\nCan you get me some context to answer this + (in the same language as my question)? ||| {{document_plaintext[:10000]}}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: xp3longcontext + reference: '' + 9d43e3fd-d46e-4149-bb60-4b3118104d95: !Template + answer_choices: Yes ||| No + id: 9d43e3fd-d46e-4149-bb60-4b3118104d95 + jinja: "Do you remember when we were reading {{document_url}} the other day? + It started sth like document_plaintext[:100].. Can you continue it for up to another 1000 characters? + ||| {{document_plaintext[100:1100]}}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: xp3longcontinue + reference: '' diff --git a/promptsource/templates/mlqa/mlqa.en.en/templates.yaml b/promptsource/templates/mlqa/mlqa.en.en/templates.yaml new file mode 100644 index 000000000..8c37484ff --- /dev/null +++ b/promptsource/templates/mlqa/mlqa.en.en/templates.yaml @@ -0,0 +1,30 @@ + 421fffe1-b752-43f8-bf50-ecf009703ef0: !Template + answer_choices: null + id: 421fffe1-b752-43f8-bf50-ecf009703ef0 + jinja: '{{ context[:answers.answer_start[0]-5]}}... + + Continue the above, such that it answers "{{question}}": + ||| {{ context[answers.answer_start[0]-5:]}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: [] + metrics: + - Squad + original_task: true + name: xp3longcontinue + reference: '' + 420fffe1-c752-43f8-bf50-ecf009703ef0: !Template + answer_choices: null + id: 420fffe1-c752-43f8-bf50-ecf009703ef0 + jinja: 'I found a text that answers "{{question}}" with {{answers.text[0]}}. + It starts with "{{ context[:10] }}". Can you continue it? + + ||| {{ context[10:] }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: [] + metrics: + - Squad + original_task: true + name: xp3longanswers + reference: '' diff --git a/promptsource/templates/multi_news/templates.yaml b/promptsource/templates/multi_news/templates.yaml index 2cea0f984..f68274fa1 100644 --- a/promptsource/templates/multi_news/templates.yaml +++ b/promptsource/templates/multi_news/templates.yaml @@ -187,3 +187,49 @@ templates: original_task: true name: distill reference: '' + bc921e51-c0a9-473c-aa85-adcab21b9ba9: !Template + answer_choices: null + id: bc921e51-c0a9-473c-aa85-adcab21b9ba9 + jinja: '{% set docs = document.split("3ed2dface8203c4c9dfb1a5dc58e41e0||") | reject("equalto", + "") | list%} + + {% if document != "" %} + + "{{summary[2:]}}" <- This could be a summary of what article? + + ||| + + {{docs | choice}} + + {% endif %}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - BLEU + - ROUGE + original_task: false + name: xp3longwhatarticle + reference: '' + bc922e52-c0a9-473c-aa85-adcab21b9ba9: !Template + answer_choices: null + id: bc922e52-c0a9-473c-aa85-adcab21b9ba9 + jinja: '{% set docs = document.split("3ed2dface8203c4c9dfb1a5dc58e41e0||") | reject("equalto", + "") | list%} + {% if document != "" %} + After reading the summary "{{summary[2:]}}", I became interested. + Can you give me the full article? + ||| + {{docs | choice}} + {% endif %}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - BLEU + - ROUGE + original_task: false + name: xp3longfullarticle + reference: '' diff --git a/promptsource/templates/quail/templates.yaml b/promptsource/templates/quail/templates.yaml index 79aef8c57..54f98209b 100644 --- a/promptsource/templates/quail/templates.yaml +++ b/promptsource/templates/quail/templates.yaml @@ -333,3 +333,34 @@ templates: original_task: true name: description_context_question_answer_text reference: '' + f474c2ca-952a-47ab-8420-cb5fb2c693d9: !Template + answer_choices: '{{answers | join("|||")}}' + id: f474c2ca-952a-47ab-8420-cb5fb2c693d9 + jinja: 'Given the title <<{{metadata["title"]}}>>, write a 300-350 token long passage. + ||| + + {{ answer_choices[correct_answer_id] }}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: xp3longtokenpassage + reference: '' + f444c2ca-952a-47ab-8420-cb5fb2c693d9: !Template + answer_choices: '{{answers | join("|||")}}' + id: f444c2ca-952a-47ab-8420-cb5fb2c693d9 + jinja: 'Write a story with reference to the question "{{question}}". + The name of the story should be {{metadata["title"]}}. + Story: ||| {{ answer_choices[correct_answer_id] }}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: xp3longstory + reference: '' diff --git a/promptsource/templates/quoref/templates.yaml b/promptsource/templates/quoref/templates.yaml index 44586d480..5814a0209 100644 --- a/promptsource/templates/quoref/templates.yaml +++ b/promptsource/templates/quoref/templates.yaml @@ -220,3 +220,44 @@ templates: original_task: true name: 'Read And Extract ' reference: '' + fcbe1609-06ce-4cbd-91de-adc38966bcac: !Template + answer_choices: null + id: fcbe1609-06ce-4cbd-91de-adc38966bcac + jinja: 'I was wondering: {{question}} + Write an article about this answering my question. ||| {{context}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Squad + original_task: true + name: xp3longwritearticle + reference: '' + fcbe0709-06ce-4cbd-91de-adc38966bcac: !Template + answer_choices: null + id: fcbe0709-06ce-4cbd-91de-adc38966bcac + jinja: 'Generate a passage from {url}. ||| {{context}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Squad + original_task: true + name: xp3longurlpassage + reference: '' + fabe0709-06ce-4cbd-91de-adc38966bcac: !Template + answer_choices: null + id: fabe0709-06ce-4cbd-91de-adc38966bcac + jinja: 'I asked my friend: "{{question}}"...She said "{{answers.text | choice}}". + I am not convinced. Can you give me an article that proves her correct?" ||| {{context}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Squad + original_task: true + name: xp3longprove + reference: '' diff --git a/promptsource/templates/race/high/templates.yaml b/promptsource/templates/race/high/templates.yaml index 28c73e13a..2d708d073 100644 --- a/promptsource/templates/race/high/templates.yaml +++ b/promptsource/templates/race/high/templates.yaml @@ -200,3 +200,34 @@ templates: original_task: true name: Read the article and answer the question (no option) reference: '' + e2b9d073-e18e-4940-9868-5b4a35617c35: !Template + answer_choices: '{{ options | join("|||") }}' + id: e2b9d073-e18e-4940-9868-5b4a35617c35 + jinja: ' + + Options: + + {{"A)"}} {{options.0}} + + {{"B)"}} {{options.1}} + + {{"C)"}} {{options.2}} + + {{"D)"}} {{options.3}} + + {{question}} {{answer}} is correct. + + Write a passage that would answer the question with the answer above. + + ||| + + {{article}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: xp3longwritepassage + reference: '' diff --git a/promptsource/templates/race/middle/templates.yaml b/promptsource/templates/race/middle/templates.yaml index 311f506ba..a53df7e80 100644 --- a/promptsource/templates/race/middle/templates.yaml +++ b/promptsource/templates/race/middle/templates.yaml @@ -200,3 +200,34 @@ templates: original_task: true name: Taking a test reference: '' + e2c9d073-e18e-4940-9868-5b4a35617c35: !Template + answer_choices: '{{ options | join("|||") }}' + id: e2c9d073-e18e-4940-9868-5b4a35617c35 + jinja: ' + + Options: + + {{"A)"}} {{options.0}} + + {{"B)"}} {{options.1}} + + {{"C)"}} {{options.2}} + + {{"D)"}} {{options.3}} + + {{question}} {{answer}} is correct. + + Write a passage that would answer the question with the answer above. + + ||| + + {{article}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: xp3longwritepassage + reference: '' diff --git a/promptsource/templates/ropes/templates.yaml b/promptsource/templates/ropes/templates.yaml index 93e82240b..06b283196 100644 --- a/promptsource/templates/ropes/templates.yaml +++ b/promptsource/templates/ropes/templates.yaml @@ -278,3 +278,30 @@ templates: original_task: true name: read_background_situation reference: '' + f72e0adb-ca74-4280-8ed3-8b53411d87ce: !Template + answer_choices: null + id: f72e0adb-ca74-4280-8ed3-8b53411d87ce + jinja: '{{situation}}\n\n{{question}} I need some useful background to understand the question. + Can you provide me some? ||| {{background}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Squad + original_task: true + name: xp3longneedbackground + reference: '' + f73e0adb-ca74-4280-8ed3-8b53411d87ce: !Template + answer_choices: null + id: f73e0adb-ca74-4280-8ed3-8b53411d87ce + jinja: 'In what kind of a situation would you ask this question: "{{question}}"? ||| {{situation}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Squad + original_task: true + name: xp3longwhatsituation + reference: '' diff --git a/promptsource/templates/rotten_tomatoes/templates.yaml b/promptsource/templates/rotten_tomatoes/templates.yaml index cb5fdd5a3..37001bb8c 100644 --- a/promptsource/templates/rotten_tomatoes/templates.yaml +++ b/promptsource/templates/rotten_tomatoes/templates.yaml @@ -140,3 +140,17 @@ templates: original_task: true name: Reviewer Sentiment Feeling reference: '' + e31970ab-42c0-4e6e-a08f-4940d889ef37: !Template + answer_choices: negative ||| amazing + id: e31970ab-42c0-4e6e-a08f-4940d889ef37 + jinja: 'Write a movie review about your {{answer_choices[label]}} + experience with a movie. ||| {{text}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: xp3longreview + reference: '' diff --git a/promptsource/templates/sciq/templates.yaml b/promptsource/templates/sciq/templates.yaml index 7b3825017..bec293491 100644 --- a/promptsource/templates/sciq/templates.yaml +++ b/promptsource/templates/sciq/templates.yaml @@ -98,3 +98,30 @@ templates: original_task: true name: Direct Question reference: '' + d427fcfb-9f00-4186-95d8-e63609495164: !Template + answer_choices: '{{distractor1}} ||| {{distractor2}} ||| {{distractor3}} ||| {{correct_answer}}' + id: d427fcfb-9f00-4186-95d8-e63609495164 + jinja: "{{question}} {{answer_choices[3]}}. Write a short text to support this claim. ||| {{support}} " + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: xp3longsupportclaim + reference: '' + d418fcfb-9f00-4186-95d8-e63609495164: !Template + answer_choices: '{{distractor1}} ||| {{distractor2}} ||| {{distractor3}} ||| {{correct_answer}}' + id: d418fcfb-9f00-4186-95d8-e63609495164 + jinja: "{{question}}\n\nI know that the answer is not {{distractor1}}, {{distractor2}} or {{distractor3}}. + But what is the correct answer? Can you write a few sentences to explain it? ||| {{support}} " + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: xp3longexplain + reference: '' diff --git a/promptsource/templates/squad_v2/templates.yaml b/promptsource/templates/squad_v2/templates.yaml index 814be0b6f..77ee67c68 100644 --- a/promptsource/templates/squad_v2/templates.yaml +++ b/promptsource/templates/squad_v2/templates.yaml @@ -385,3 +385,36 @@ templates: original_task: false name: Topic Prediction - Context reference: Predict the topic from the passage + fddf132e-6c70-4188-999e-93601ee8e089: !Template + answer_choices: null + id: fddf132e-6c70-4188-999e-93601ee8e089 + jinja: '{{title | replace("_", " ")}} + + Passage about the Title: ||| {{context}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - BLEU + - ROUGE + - Other + original_task: false + name: xp3longgenpassage + reference: '' + fdcf142e-6c70-4188-999e-93601ee8e089: !Template + answer_choices: null + id: fdcf142e-6c70-4188-999e-93601ee8e089 + jinja: 'Given the question "{{question}}", please generate an + article that contains the answer to the question. ||| {{context}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - BLEU + - ROUGE + - Other + original_task: false + name: xp3longgenarticle + reference: '' diff --git a/promptsource/templates/super_glue/boolq/templates.yaml b/promptsource/templates/super_glue/boolq/templates.yaml index 0fa8b0ac9..2b3c96c52 100644 --- a/promptsource/templates/super_glue/boolq/templates.yaml +++ b/promptsource/templates/super_glue/boolq/templates.yaml @@ -188,3 +188,17 @@ templates: original_task: true name: valid_binary reference: '' + ec88772c-e81e-4b8a-a77b-b75efd1c212a: !Template + answer_choices: False ||| True + id: ec88772c-e81e-4b8a-a77b-b75efd1c212a + jinja: 'I wonder {{question}}? Provide me a text that answers it (not too long). ||| + {{passage}}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: xp3longprovidetext + reference: '' diff --git a/promptsource/templates/super_glue/multirc/templates.yaml b/promptsource/templates/super_glue/multirc/templates.yaml index 2107173d0..d15b7fd7e 100644 --- a/promptsource/templates/super_glue/multirc/templates.yaml +++ b/promptsource/templates/super_glue/multirc/templates.yaml @@ -183,3 +183,16 @@ templates: original_task: true name: "I was going to say\u2026" reference: '' + d3d78b88-8845-45b5-935a-6451da00b285: !Template + answer_choices: No ||| Yes + id: d3d78b88-8845-45b5-935a-6451da00b285 + jinja: "Given the question {{ question }}, write a paragraph explaining it. ||| {{ paragraph }}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: "xp3longwritepara" + reference: '' diff --git a/promptsource/templates/wiki_hop/original/templates.yaml b/promptsource/templates/wiki_hop/original/templates.yaml index f1f620a3a..9b778f067 100644 --- a/promptsource/templates/wiki_hop/original/templates.yaml +++ b/promptsource/templates/wiki_hop/original/templates.yaml @@ -289,3 +289,19 @@ templates: name: choose_best_object_interrogative_2 reference: Given information and subject and relation, choose the best object entity (interrogative instruction). + f55936e1-cbde-4d41-b462-6150cce8c1c8: !Template + answer_choices: '{{candidates | join("|||")}}' + id: f55936e1-cbde-4d41-b462-6150cce8c1c8 + jinja: '{% set question_split = question.split('' '') %}Generate some paragraphs, + such that the entity of ''{{answer}}'' relates to ''{{ question_split[1:] | join(" ")}}'' + with the relationship of ''{{ question_split[0] | replace("_", " ")}}''. + ||| {% for support in supports %} - {{ support }} {% endfor %}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: xp3longgenrelation + reference: '' diff --git a/promptsource/templates/wiqa/templates.yaml b/promptsource/templates/wiqa/templates.yaml index 66ec559a8..574353fe1 100644 --- a/promptsource/templates/wiqa/templates.yaml +++ b/promptsource/templates/wiqa/templates.yaml @@ -238,3 +238,20 @@ templates: original_task: false name: does_the_supposed_perturbation_have_an_effect reference: '' + a37313bd-94bb-47ab-82bf-538df1b1ad5f: !Template + answer_choices: yes ||| no + id: a37313bd-94bb-47ab-82bf-538df1b1ad5f + jinja: '{{ question_para_step | first }} + + What follows after the above? + + ||| - {{ question_para_step[1:] | join("\n- ") }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: false + name: xp3longfollows + reference: '' diff --git a/promptsource/templates/xquad/xquad.en/templates.yaml b/promptsource/templates/xquad/xquad.en/templates.yaml index fb70ec27a..10ce485a5 100644 --- a/promptsource/templates/xquad/xquad.en/templates.yaml +++ b/promptsource/templates/xquad/xquad.en/templates.yaml @@ -125,3 +125,28 @@ templates: original_task: true name: read_passage reference: '' + f3d9bc66-1188-40d4-9ac9-17e0af50b788: !Template + answer_choices: null + id: f3d9bc66-1188-40d4-9ac9-17e0af50b788 + jinja: "{{question}} Apparently, it's {{answers.text[0]}}. Can you provide me some context? ||| {{context}}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: [] + metrics: + - Squad + original_task: true + name: xp3longcontext + reference: '' + f3d9bd66-1188-40d4-9ac9-17e0af50b788: !Template + answer_choices: null + id: f3d9bd66-1188-40d4-9ac9-17e0af50b788 + jinja: "Given the answer {{answers.text[0]}} to the {{question}}, write a text that explains this. + The answer should start at char number {{answers.answer_start[0]}}. Text: ||| {{context}}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: [] + metrics: + - Squad + original_task: true + name: xp3longchar + reference: '' diff --git a/promptsource/templates/xsum/templates.yaml b/promptsource/templates/xsum/templates.yaml index 568304510..27e729736 100644 --- a/promptsource/templates/xsum/templates.yaml +++ b/promptsource/templates/xsum/templates.yaml @@ -178,3 +178,31 @@ templates: original_task: true name: DOC_tldr reference: GPT-2 TLDR + d878b769-9da2-4d9d-9517-1edcca3b1b26: !Template + answer_choices: null + id: d878b769-9da2-4d9d-9517-1edcca3b1b26 + jinja: '{{summary}}\nGiven the above summary, what is a fitting news article for it? ||| {{document}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - ROUGE + - BLEU + original_task: true + name: xp3longfittingnews + reference: '' + d879b768-9da2-4d9d-9517-1edcca3b1b26: !Template + answer_choices: null + id: d879b768-9da2-4d9d-9517-1edcca3b1b26 + jinja: 'Generate a news article that can be summarized as "{{summary}}". Article: ||| {{document}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - ROUGE + - BLEU + original_task: true + name: xp3longsummarizedas + reference: '' diff --git a/promptsource/templates/yelp_review_full/templates.yaml b/promptsource/templates/yelp_review_full/templates.yaml index 84c027c17..b88fda960 100644 --- a/promptsource/templates/yelp_review_full/templates.yaml +++ b/promptsource/templates/yelp_review_full/templates.yaml @@ -121,3 +121,16 @@ templates: original_task: true name: format_rating reference: It's simulating the format of a webpage. + e8191beb-c0fa-490d-9e0c-32eb6907dbc0: !Template + answer_choices: 1 star ||| 2 stars ||| 3 stars ||| 4 stars ||| 5 stars + id: e8191beb-c0fa-490d-9e0c-32eb6907dbc0 + jinja: 'Imaginary {{ answer_choices[label] }} review: ||| {{ text }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: xp3longreview + reference: ''