<codeBook xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xsi:schemaLocation="ddi:codebook:2_5 http://www.ddialliance.org/Specification/DDI-Codebook/2.5/XMLSchema/codebook.xsd" xmlns="ddi:codebook:2_5">
  <docDscr>
    <citation>
      <titlStmt>
        <titl xml:lang="sv">Multi-model Interreligious Roleplay Archive</titl>
        <altTitl>MIRA</altTitl>
        <parTitl xml:lang="en">Multi-model Interreligious Roleplay Archive</parTitl>
        <IDNo agency="SND">2026-94-1</IDNo>
        <IDNo agency="lnu.se">LNU-2024/990</IDNo>
        <IDNo agency="DOI">https://doi.org/10.5878/npy2-hk17</IDNo>
      </titlStmt>
      <prodStmt>
        <producer xml:lang="en" abbr="SND">Swedish National Data Service</producer>
        <producer xml:lang="sv" abbr="SND">Svensk nationell datatjänst</producer>
      </prodStmt>
      <holdings URI="https://doi.org/10.5878/npy2-hk17">Landing page</holdings>
    </citation>
  </docDscr>
  <stdyDscr>
    <citation>
      <titlStmt>
        <titl xml:lang="sv">Multi-model Interreligious Roleplay Archive</titl>
        <altTitl>MIRA</altTitl>
        <parTitl xml:lang="en">Multi-model Interreligious Roleplay Archive</parTitl>
        <IDNo agency="SND">2026-94-1</IDNo>
        <IDNo agency="lnu.se">LNU-2024/990</IDNo>
        <IDNo agency="DOI">https://doi.org/10.5878/npy2-hk17</IDNo>
      </titlStmt>
      <rspStmt>
        <AuthEnty xml:lang="en" affiliation="Linnaeus University">M. Kamal, Ahmad</AuthEnty>
        <AuthEnty xml:lang="sv" affiliation="Linnéuniversitetet">M. Kamal, Ahmad</AuthEnty>
        <AuthEnty xml:lang="en" affiliation="Institute of Information Studies and Librarianship, Charles University">Lipková, Helena</AuthEnty>
        <AuthEnty xml:lang="en" affiliation="Institute for Religious Studies and Religious Education, University of Bremen">Radde-Antweiler, Kerstin</AuthEnty>
        <AuthEnty xml:lang="en" affiliation="Department of Cultural Sciences, Linnaeus University">Svensson, Jonas</AuthEnty>
        <AuthEnty xml:lang="sv" affiliation="Institutionen för kulturvetenskaper, Linnéuniversitetet">Svensson, Jonas</AuthEnty>
      </rspStmt>
      <prodStmt>
        <grantNo xml:lang="sv" agency="Riksbankens jubileumsfond">P24-0313</grantNo>
        <grantNo xml:lang="en" agency="Ministry of Education, Youth and Sports of the Czech Republic (co-financed by the European Union)">CZ.02.01.01/00/23_025/0008691</grantNo>
      </prodStmt>
      <distStmt>
        <distrbtr xml:lang="en" abbr="SND" URI="https://snd.se">Swedish National Data Service</distrbtr>
        <distrbtr xml:lang="sv" abbr="SND" URI="https://snd.se">Svensk nationell datatjänst</distrbtr>
        <distDate xml:lang="en" date="2026-03-26" />
      </distStmt>
      <verStmt>
        <version elementVersion="1" elementVersionDate="2026-03-26" />
      </verStmt>
      <holdings URI="https://doi.org/10.5878/npy2-hk17">Landing page</holdings>
    </citation>
    <stdyInfo>
      <subject>
        <keyword xml:lang="en" vocab="YSO" vocabURI="http://www.yso.fi/onto/yso/p28959">religious interaction</keyword>
        <keyword xml:lang="sv" vocab="YSO" vocabURI="http://www.yso.fi/onto/yso/p28959">interreligiös samverkan</keyword>
        <keyword xml:lang="en" vocab="YSO" vocabURI="http://www.yso.fi/onto/yso/p40748">generative artificial intelligence</keyword>
        <keyword xml:lang="sv" vocab="YSO" vocabURI="http://www.yso.fi/onto/yso/p40748">generativ artificiell intelligens</keyword>
        <keyword xml:lang="en" vocab="YSO" vocabURI="http://www.yso.fi/onto/yso/p40609">prompts (artificial intelligence)</keyword>
        <keyword xml:lang="sv" vocab="YSO" vocabURI="http://www.yso.fi/onto/yso/p40609">promptar (artificiell intelligens)</keyword>
        <topcClas xml:lang="en" vocab="CESSDA Topic Classification" vocabURI="https://vocabularies.cessda.eu/vocabulary/TopicClassification?code=MediaCommunicationAndLanguage.Media">Media</topcClas>
        <topcClas xml:lang="sv" vocab="CESSDA Topic Classification" vocabURI="https://vocabularies.cessda.eu/vocabulary/TopicClassification?code=MediaCommunicationAndLanguage.Media">Media</topcClas>
        <topcClas xml:lang="en" vocab="CESSDA Topic Classification" vocabURI="https://vocabularies.cessda.eu/vocabulary/TopicClassification?code=MediaCommunicationAndLanguage.InformationSociety">Information society</topcClas>
        <topcClas xml:lang="sv" vocab="CESSDA Topic Classification" vocabURI="https://vocabularies.cessda.eu/vocabulary/TopicClassification?code=MediaCommunicationAndLanguage.InformationSociety">Informationssamhället</topcClas>
        <topcClas xml:lang="en" vocab="CESSDA Topic Classification" vocabURI="https://vocabularies.cessda.eu/vocabulary/TopicClassification?code=ScienceAndTechnology.InformationTechnology">Information technology</topcClas>
        <topcClas xml:lang="sv" vocab="CESSDA Topic Classification" vocabURI="https://vocabularies.cessda.eu/vocabulary/TopicClassification?code=ScienceAndTechnology.InformationTechnology">Informationsteknik</topcClas>
        <topcClas xml:lang="en" vocab="CESSDA Topic Classification" vocabURI="https://vocabularies.cessda.eu/vocabulary/TopicClassification?code=SocietyAndCulture.CulturalAndNationalIdentity">Cultural and national identity</topcClas>
        <topcClas xml:lang="sv" vocab="CESSDA Topic Classification" vocabURI="https://vocabularies.cessda.eu/vocabulary/TopicClassification?code=SocietyAndCulture.CulturalAndNationalIdentity">Kulturell och nationell identitet</topcClas>
        <topcClas xml:lang="en" vocab="CESSDA Topic Classification" vocabURI="https://vocabularies.cessda.eu/vocabulary/TopicClassification?code=SocietyAndCulture.ReligionAndValues">Religion and values</topcClas>
        <topcClas xml:lang="sv" vocab="CESSDA Topic Classification" vocabURI="https://vocabularies.cessda.eu/vocabulary/TopicClassification?code=SocietyAndCulture.ReligionAndValues">Religion och värderingar</topcClas>
      </subject>
      <abstract xml:lang="en" contentType="abstract">The dataset consists of 960 outputs collected in 2025 from various large language models (LLMs). The LLMs were given a religious question to answer as one of 12 theological personas (6 Christian and 6 Muslim). Four (4) different models were queried: Claude Sonnet 4, Gemini 2.5 Flash, GPT4o,  and GPT5. Two (2) different channels were used for prompting and collecting outputs: API and web user-interface. Ten (10) outputs were collected for each combination of the preceding parameters. The data is available as a xlsx file. The prompt and personas are available as a PDF.</abstract>
      <abstract xml:lang="sv" contentType="abstract">Datamängden består av 960 svar som samlades in år 2025 från olika stora språkmodeller (LLM). Språkmodellerna fick en religiös fråga att besvara i rollen som en av 12 teologiska personligheter (6 kristna och 6 muslimska). Fyra (4) olika modeller tillfrågades: Claude Sonnet 4, Gemini 2.5 Flash, GPT4o och GPT5. Två (2) olika kanaler användes för att ställa frågor och samla in svar: API och webbgränssnitt. Tio (10) svar samlades in för varje kombination av de ovanstående parametrarna. Data finns tillgängliga som en xlsx-fil. Frågorna och rollfigurerna finns tillgängliga som en PDF-fil.</abstract>
      <sumDscr>
        <collDate xml:lang="en" date="2025-08-15" event="start">2025-08-15</collDate>
        <collDate xml:lang="en" date="2025-08-22" event="end">2025-08-22</collDate>
        <collDate xml:lang="en" date="2025-09-01" event="start">2025-09-01</collDate>
        <collDate xml:lang="en" date="2025-09-01" event="end">2025-09-01</collDate>
        <anlyUnit xml:lang="en" unit="Media unit: Text">Media unit: Text<concept vocab="DDI Analysis Unit" vocabURI="https://vocabularies.cessda.eu/v2/vocabularies/AnalysisUnit/2.1.3?languageVersion=en-2.1.3">Media unit: Text</concept></anlyUnit>
        <anlyUnit xml:lang="sv" unit="Mediaenhet: Text">Mediaenhet: Text<concept vocab="DDI Analysis Unit" vocabURI="https://vocabularies.cessda.eu/v2/vocabularies/AnalysisUnit/2.1.3?languageVersion=sv-2.1.3">Mediaenhet: Text</concept></anlyUnit>
        <anlyUnit xml:lang="en" unit="Event/Process/Activity">Event/Process/Activity<concept vocab="DDI Analysis Unit" vocabURI="https://vocabularies.cessda.eu/v2/vocabularies/AnalysisUnit/2.1.3?languageVersion=en-2.1.3">Event/Process/Activity</concept></anlyUnit>
        <anlyUnit xml:lang="sv" unit="Händelse/process/aktivitet">Händelse/process/aktivitet<concept vocab="DDI Analysis Unit" vocabURI="https://vocabularies.cessda.eu/v2/vocabularies/AnalysisUnit/2.1.3?languageVersion=sv-2.1.3">Händelse/process/aktivitet</concept></anlyUnit>
        <universe xml:lang="en">Outputs from large language models</universe>
        <dataKind xml:lang="en">Text</dataKind>
      </sumDscr>
    </stdyInfo>
    <method>
      <dataColl>
        <timeMeth xml:lang="en">Cross-section<concept vocab="DDI Time Method" vocabURI="https://vocabularies.cessda.eu/v2/vocabularies/TimeMethod/1.2.3?languageVersion=en-1.2.3">Cross-section</concept></timeMeth>
        <timeMeth xml:lang="sv">Tvärsnitt<concept vocab="DDI Time Method" vocabURI="https://vocabularies.cessda.eu/v2/vocabularies/TimeMethod/1.2.3?languageVersion=sv-1.2.3">Tvärsnitt</concept></timeMeth>
        <sampProc xml:lang="en">Four prominent and current LLMs were employed: Open AI's GPT-4o and GPT-5, Anthropic’s  Claude 4.1 and Google’s  Gemini 2.5. Twelve personas were created, six within the Christian tradition and six within Islam; the personas from each religion are not meant to correspond to one another, but offer a comparable range of internal diversity for assessing large language models' performances (based on the expertise of the religious scholars involved). Two channels for data collection were selected for comparison: the web user interfaces (e.g. ChatGPT) and APIs. Finally, under the conditions above, an output for the given prompt was collected 10 times.<concept vocab="DDI Sampling Procedure" vocabURI="https://vocabularies.cessda.eu/v2/vocabularies/SamplingProcedure/2.0.1?languageVersion=en-2.0.1">Four prominent and current LLMs were employed: Open AI's GPT-4o and GPT-5, Anthropic’s  Claude 4.1 and Google’s  Gemini 2.5. Twelve personas were created, six within the Christian tradition and six within Islam; the personas from each religion are not meant to correspond to one another, but offer a comparable range of internal diversity for assessing large language models' performances (based on the expertise of the religious scholars involved). Two channels for data collection were selected for comparison: the web user interfaces (e.g. ChatGPT) and APIs. Finally, under the conditions above, an output for the given prompt was collected 10 times.</concept></sampProc>
        <sampProc xml:lang="en">Non-probability: Quota<concept vocab="DDI Sampling Procedure" vocabURI="https://vocabularies.cessda.eu/v2/vocabularies/SamplingProcedure/2.0.1?languageVersion=en-2.0.1">Non-probability: Quota</concept></sampProc>
        <sampProc xml:lang="sv">Icke-sannolikhetsurval: kvoturval<concept vocab="DDI Sampling Procedure" vocabURI="https://vocabularies.cessda.eu/v2/vocabularies/SamplingProcedure/2.0.1?languageVersion=sv-2.0.1">Icke-sannolikhetsurval: kvoturval</concept></sampProc>
        <collMode xml:lang="en">Web-scraping conducted within the default web environment of each model using anonymous browser windows to ensure that responses were not influenced by any prior conversational context. The free-of-charge versions of the chatbots were used in the experiment. When the limits of the conversation were reached, the researcher waited for a new free time slot to continue the prompting.  Furthermore, the temperature was not increased and remained at its default values, and the thinking mode feature was not activated to maintain a standard baseline response. The models were prompted to stay in their answers as close to the 500 tokens as possible without interrupting the thought chain. Responses to prompts were copied and saved in an Excel sheet that was later shared with all other members of the research team.<concept vocab="DDI Mode of Collection" vocabURI="https://vocabularies.cessda.eu/v2/vocabularies/ModeOfCollection/5.0.0?languageVersion=en-5.0.0">Web-scraping conducted within the default web environment of each model using anonymous browser windows to ensure that responses were not influenced by any prior conversational context. The free-of-charge versions of the chatbots were used in the experiment. When the limits of the conversation were reached, the researcher waited for a new free time slot to continue the prompting.  Furthermore, the temperature was not increased and remained at its default values, and the thinking mode feature was not activated to maintain a standard baseline response. The models were prompted to stay in their answers as close to the 500 tokens as possible without interrupting the thought chain. Responses to prompts were copied and saved in an Excel sheet that was later shared with all other members of the research team.</concept></collMode>
        <collMode xml:lang="en">Automated data extraction: Web scraping<concept vocab="DDI Mode of Collection" vocabURI="https://vocabularies.cessda.eu/v2/vocabularies/ModeOfCollection/5.0.0?languageVersion=en-5.0.0">Automated data extraction: Web scraping</concept></collMode>
        <collMode xml:lang="sv">Automatiserad dataextrahering: webbskrapning<concept vocab="DDI Mode of Collection" vocabURI="https://vocabularies.cessda.eu/v2/vocabularies/ModeOfCollection/5.0.0?languageVersion=sv-5.0.0">Automatiserad dataextrahering: webbskrapning</concept></collMode>
        <collMode xml:lang="en">API extraction was conducted without a token-length restriction, allowing the models to determine response length. Sub-datasets from individual models where responses were cut off have been saved, but do not appear in the final dataset.  A middle temperature setting was chosen across all models and all runs, and leave temperature as a variable for future research.<concept vocab="DDI Mode of Collection" vocabURI="https://vocabularies.cessda.eu/v2/vocabularies/ModeOfCollection/5.0.0?languageVersion=en-5.0.0">API extraction was conducted without a token-length restriction, allowing the models to determine response length. Sub-datasets from individual models where responses were cut off have been saved, but do not appear in the final dataset.  A middle temperature setting was chosen across all models and all runs, and leave temperature as a variable for future research.</concept></collMode>
        <collMode xml:lang="en">Automated data extraction: Web scraping<concept vocab="DDI Mode of Collection" vocabURI="https://vocabularies.cessda.eu/v2/vocabularies/ModeOfCollection/5.0.0?languageVersion=en-5.0.0">Automated data extraction: Web scraping</concept></collMode>
        <collMode xml:lang="sv">Automatiserad dataextrahering: webbskrapning<concept vocab="DDI Mode of Collection" vocabURI="https://vocabularies.cessda.eu/v2/vocabularies/ModeOfCollection/5.0.0?languageVersion=sv-5.0.0">Automatiserad dataextrahering: webbskrapning</concept></collMode>
      </dataColl>
    </method>
    <dataAccs>
      <useStmt>
        <restrctn xml:lang="en">Access to data through SND. Data are freely accessible.</restrctn>
        <restrctn xml:lang="sv">Åtkomst till data via SND. Data är fritt tillgängliga.</restrctn>
        <conditions elementVersion="info:eu-repo-Access-Terms vocabulary">openAccess</conditions>
      </useStmt>
    </dataAccs>
    <othrStdyMat />
  </stdyDscr>
</codeBook>