<?xml version="1.0" encoding="UTF-8" ?>
<!-- Created from PDF via Acrobat SaveAsXML -->
<!-- Mapping Table version: 28-February-2003 -->
<TaggedPDF-doc>
<?xpacket begin='﻿' id='W5M0MpCehiHzreSzNTczkc9d'?>
<?xpacket begin="﻿" id="W5M0MpCehiHzreSzNTczkc9d"?>
<x:xmpmeta xmlns:x="adobe:ns:meta/" x:xmptk="Adobe XMP Core 9.1-c001 79.675d0f7, 2023/06/11-19:21:16        ">
   <rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#">
      <rdf:Description rdf:about=""
            xmlns:dc="http://purl.org/dc/elements/1.1/"
            xmlns:xmp="http://ns.adobe.com/xap/1.0/"
            xmlns:xmpMM="http://ns.adobe.com/xap/1.0/mm/"
            xmlns:stRef="http://ns.adobe.com/xap/1.0/sType/ResourceRef#"
            xmlns:stEvt="http://ns.adobe.com/xap/1.0/sType/ResourceEvent#"
            xmlns:xmpRights="http://ns.adobe.com/xap/1.0/rights/"
            xmlns:photoshop="http://ns.adobe.com/photoshop/1.0/"
            xmlns:pdf="http://ns.adobe.com/pdf/1.3/">
         <dc:format>xml</dc:format>
         <dc:creator>
            <rdf:Seq>
               <rdf:li>Erna Oliver</rdf:li>
               <rdf:li>Geesje van den Berg</rdf:li>
            </rdf:Seq>
         </dc:creator>
         <dc:rights>
            <rdf:Alt>
               <rdf:li xml:lang="x-default">Creative Commons Attribution 4.0 International Public License</rdf:li>
            </rdf:Alt>
         </dc:rights>
         <dc:description>
            <rdf:Alt>
               <rdf:li xml:lang="x-default">The nineteen chapters in this two-volume publication offer insights into both challenges and opportunities for working with AI in general and Gen-AI in particular. Overall, the argument that emerges is for a reasoned engagement with the evolving possibilities of the new technology, but as Mark Twain is credited with observing, gradual improvement is probably going to be more useful than delayed perfection.</rdf:li>
            </rdf:Alt>
         </dc:description>
         <dc:title>
            <rdf:Alt>
               <rdf:li xml:lang="x-default">Artificial Intelligence Transforming Higher Education Vol 2</rdf:li>
            </rdf:Alt>
         </dc:title>
         <xmp:CreateDate>2025-08-14T14:27:05+02:00</xmp:CreateDate>
         <xmp:MetadataDate>2025-08-29T12:55:56+02:00</xmp:MetadataDate>
         <xmp:ModifyDate>2025-08-29T12:55:56+02:00</xmp:ModifyDate>
         <xmp:CreatorTool>Adobe InDesign 20.5 (Windows)</xmp:CreatorTool>
         <xmpMM:InstanceID>uuid:87000788-3871-4a8f-b571-978d8ab5f085</xmpMM:InstanceID>
         <xmpMM:OriginalDocumentID>xmp.did:019c3093-df57-154c-812a-a23dabd05243</xmpMM:OriginalDocumentID>
         <xmpMM:DocumentID>xmp.id:26b62254-79c8-3c47-a3e3-884c1241baae</xmpMM:DocumentID>
         <xmpMM:RenditionClass>proof:pdf</xmpMM:RenditionClass>
         <xmpMM:DerivedFrom rdf:parseType="Resource">
            <stRef:instanceID>xmp.iid:eb410db0-2a04-724d-8929-f1f30fb7dd19</stRef:instanceID>
            <stRef:documentID>xmp.did:02ada829-62af-0246-bf70-c20742a93d96</stRef:documentID>
            <stRef:originalDocumentID>xmp.did:019c3093-df57-154c-812a-a23dabd05243</stRef:originalDocumentID>
            <stRef:renditionClass>default</stRef:renditionClass>
         </xmpMM:DerivedFrom>
         <xmpMM:History>
            <rdf:Seq>
               <rdf:li rdf:parseType="Resource">
                  <stEvt:action>converted</stEvt:action>
                  <stEvt:parameters>from application/x-indesign to application/pdf</stEvt:parameters>
                  <stEvt:softwareAgent>Adobe InDesign 20.5 (Windows)</stEvt:softwareAgent>
                  <stEvt:changed>/</stEvt:changed>
                  <stEvt:when>2025-08-14T14:27:05+02:00</stEvt:when>
               </rdf:li>
            </rdf:Seq>
         </xmpMM:History>
         <xmpRights:Marked>True</xmpRights:Marked>
         <xmpRights:WebStatement>https://creativecommons.org/licenses/by/4.0</xmpRights:WebStatement>
         <photoshop:City>Johannesburg</photoshop:City>
         <photoshop:State>Gauteng</photoshop:State>
         <photoshop:Country>South Africa</photoshop:Country>
         <pdf:Producer>Adobe PDF Library 17.0</pdf:Producer>
         <pdf:Trapped>False</pdf:Trapped>
      </rdf:Description>
   </rdf:RDF>
</x:xmpmeta>
                                                                                                    
                                                                                                    
                                                                                                    
                                                                                                    
                                                                                                    
                                                                                                    
                                                                                                    
                                                                                                    
                                                                                                    
                                                                                                    
                                                                                                    
                                                                                                    
                                                                                                    
                                                                                                    
                                                                                                    
                                                                                                    
                                                                                                    
                                                                                                    
                                                                                                    
                                                                                                    
                           
<?xpacket end="w"?>
<?xpacket end='r'?>
<bookmark-tree>
<bookmark title="Abbreviations">
<destination structID="LinkTarget_10751"/>
</bookmark>
<bookmark title="Foreword">
<destination structID="LinkTarget_10752"/>
</bookmark>
<bookmark title="When the rhythm of the music changes, the dance step must change also">
<destination structID="LinkTarget_10752"/>
<bookmark title="Tony J. Mays  ￼">
<destination structID="LinkTarget_10752"/>
</bookmark>
</bookmark>
<bookmark title="Section 1">
<destination structID="LinkTarget_10753"/>
</bookmark>
<bookmark title="Ethics, Integrity, and AI in Higher Education: Navigating Challenges and Shaping Futures">
<destination structID="LinkTarget_10753"/>
</bookmark>
<bookmark title="Chapter 1">
<destination structID="LinkTarget_10754"/>
</bookmark>
<bookmark title="Navigating Academic Anxiety and Fostering Integrity in the Age of GPT Detectors">
<destination structID="LinkTarget_10754"/>
<bookmark title="Samuel Amponsah  ￼">
<destination structID="LinkTarget_10754"/>
</bookmark>
<bookmark title="Micheal M van Wyk  ￼">
<destination structID="LinkTarget_10754"/>
</bookmark>
<bookmark title="Michael A Adarkwah  ￼">
<destination structID="LinkTarget_10754"/>
</bookmark>
</bookmark>
<bookmark title="Chapter 2">
<destination structID="LinkTarget_10755"/>
</bookmark>
<bookmark title="AI and Academic Integrity Decussation: Implications for Higher Education">
<destination structID="LinkTarget_10755"/>
<bookmark title="Nicky Tjano  ￼">
<destination structID="LinkTarget_10755"/>
</bookmark>
</bookmark>
<bookmark title="Chapter 3">
<destination structID="LinkTarget_10756"/>
</bookmark>
<bookmark title="Critical Considerations for Establishing a Link between Artificial Intelligence and Quality Assurance in Higher Education">
<destination structID="LinkTarget_10756"/>
<bookmark title="Faiza Gani  ￼">
<destination structID="LinkTarget_10756"/>
</bookmark>
</bookmark>
<bookmark title="Chapter 4">
<destination structID="LinkTarget_10757"/>
</bookmark>
<bookmark title="Artificial Intelligence and the Ethics of Tomorrow: Tracing the Shift in Information Ethics through History">
<destination structID="LinkTarget_10757"/>
<bookmark title="Brenda van Wyk  ￼">
<destination structID="LinkTarget_10757"/>
</bookmark>
<bookmark title="Marlene Holmner  ￼">
<destination structID="LinkTarget_10757"/>
</bookmark>
</bookmark>
<bookmark title="Chapter 5">
<destination structID="LinkTarget_10758"/>
</bookmark>
<bookmark title="Unpacking the Role of Big Data, Artificial Intelligence, and Predictive Analytics in Education: Implications for Educators and Research Ethics Review Committees">
<destination structID="LinkTarget_10758"/>
<bookmark title="Nicky Tjano  ￼">
<destination structID="LinkTarget_10758"/>
</bookmark>
</bookmark>
<bookmark title="Section 2">
<destination structID="LinkTarget_10759"/>
</bookmark>
<bookmark title="Generative AI in Higher Education: Revolutionising Teaching, Assessment, and Integrity">
<destination structID="LinkTarget_10759"/>
</bookmark>
<bookmark title="Chapter 6">
<destination structID="LinkTarget_10760"/>
</bookmark>
<bookmark title="Transforming Higher Education: 
The Profound Impact of Generative Artificial Intelligence on Teaching and Learning in the ChatGPT Era">
<destination structID="LinkTarget_10760"/>
<bookmark title="Geesje Van den Berg  ￼">
<destination structID="LinkTarget_10760"/>
</bookmark>
</bookmark>
<bookmark title="Chapter 7">
<destination structID="LinkTarget_10761"/>
</bookmark>
<bookmark title="Perceiving Generative AI through Educators’ Eyes: Benefits and Challenges">
<destination structID="LinkTarget_10761"/>
<bookmark title="Geesje Van den Berg  ￼">
<destination structID="LinkTarget_10761"/>
</bookmark>
<bookmark title="Patience K. Mudau  ￼">
<destination structID="LinkTarget_10761"/>
</bookmark>
</bookmark>
<bookmark title="Chapter 8">
<destination structID="LinkTarget_10762"/>
</bookmark>
<bookmark title="Artificial Intelligence such as Generative Pretrained Transformer: Reimagining Online Assessment Methods">
<destination structID="LinkTarget_10762"/>
<bookmark title="Elize C. du Plessis  ￼">
<destination structID="LinkTarget_10762"/>
</bookmark>
</bookmark>
<bookmark title="Chapter 9">
<destination structID="LinkTarget_10763"/>
</bookmark>
<bookmark title="Is ChatGPT Capable of Generating Accurate Information?
Academics Employ Strategies to Prevent Academic Dishonesty at a Faculty of Education">
<destination structID="LinkTarget_10763"/>
<bookmark title="Micheal M van Wyk  ￼">
<destination structID="LinkTarget_10763"/>
</bookmark>
</bookmark>
</bookmark-tree>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_0.jpg"/>
</Figure>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_1.jpg"/>
</Figure>

<Part>
<H1>Artificial Intelligence Transforming Higher Education </H1>

<P>Volume 2 </P>

<P>Geesje van den Berg, Erna Oliver (Eds) </P>

<Sect><Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_2.jpg"/>
</Figure>
</Sect>

<P>Artificial Intelligence Transforming Higher Education: Volume 2 </P>

<P>Published by UJ Press University of Johannesburg Library Auckland Park Kingsway Campus PO Box 524 Auckland Park 2006 
<Link>https://ujpress.uj.ac.za/ </Link>
</P>

<P>Compilation © Geesje van den Berg, Erna Oliver 2025 Chapters © Author(s) 2025 Published Edition © Geesje van den Berg, Erna Oliver 2025 </P>

<P>First published 2025 </P>

<P>
<Link>https://doi.org/10.64449/</Link>
9780639889917 978-0-6398899-0-0 (Paperback) 978-0-6398899-1-7 (PDF) 978-0-6398899-2-4 (EPUB) 978-0-6398899-3-1 (XML) </P>

<P>This publication had been submitted to a rigorous double-blind peer-review process prior to publication and all recommendations by the reviewers were considered and implemented before publication. </P>

<P>Proofreading: Willem Oliver Cover design: Hester Roets, UJ Graphic Design Studio Typeset in 9/13pt Merriweather Light </P>

<Link><Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_3.jpg"/>
</Figure>
</Link>
</Part>

<Part>
<H1>Contents </H1>

<Table>
<TR>
<TD>
<Link>Abbreviations ......................................................................................... </Link>
</TD>

<TD>i </TD>
</TR>

<TR>
<TD>
<Link>Foreword: When the rhythm of the music changes, the dance step must change also ............................................................ Tony J. Mays </Link>
</TD>

<TD>ix </TD>
</TR>

<TR>
<TD>
<Link>Section 1: Ethics, Integrity, and AI in Higher Education: Navigating Challenges and Shaping Futures ....................... </Link>
</TD>

<TD>1 </TD>
</TR>

<TR>
<TD>
<Link>Chapter 1: Navigating Academic Anxiety and Fostering Integrity in the Age of GPT Detectors ............................................ Samuel Amponsah, Micheal M van Wyk, Michael A Adarkwah </Link>
</TD>

<TD>3 </TD>
</TR>

<TR>
<TD>
<Link>Chapter 2: AI and Academic Integrity Decussation: Implications for Higher Education ................................................. Nicky Tjano </Link>
</TD>

<TD>29 </TD>
</TR>

<TR>
<TD>
<Link>Chapter 3: Critical Considerations for Establishing a Link between Artificial Intelligence and Quality Assurance in Higher Education .................................................................................. Faiza Gani </Link>
</TD>

<TD>77 </TD>
</TR>

<TR>
<TD>
<Link>Chapter 4: Artificial Intelligence and the Ethics of Tomorrow: Tracing the Shift in Information Ethics through History ..................................................................................... Brenda van Wyk, Marlene Holmner </Link>
</TD>

<TD>101 </TD>
</TR>

<TR>
<TD>
<Link>Chapter 5: Unpacking the Role of Big Data, Artificial Intelligence, and Predictive Analytics in Education: Implications for Educators and Research Ethics Review Committees ............................................................................................. </Link>
</TD>

<TD>141 </TD>
</TR>

<TR>
<TD>
<Link>Nicky Tjano </Link>
</TD>

<TD/>
</TR>

<TR>
<TD>
<Link>Section 2: Generative AI in Higher Education: Revolutionising Teaching, Assessment, and Integrity ....... </Link>
</TD>

<TD>191 </TD>
</TR>

<TR>
<TD>
<Link>Chapter 6: Transforming Higher Education: The Profound Impact of Generative Artificial Intelligence on Teaching and Learning in the ChatGPT Era ........................... Geesje van den Berg </Link>
</TD>

<TD>193 </TD>
</TR>

<TR>
<TD>
<Link>Chapter 7: Perceiving Generative AI through Educators’ Eyes: Benefits and Challenges .......................................................... Geesje van den Berg, Patience K. Mudau </Link>
</TD>

<TD>219 </TD>
</TR>

<TR>
<TD>
<Link>Chapter 8: Artificial Intelligence such as Generative Pretrained Transformer: Reimagining Online Assessment Methods ................................................................................................... </Link>
</TD>

<TD>245 </TD>
</TR>

<TR>
<TD>
<Link>Elize C. du Plessis </Link>
</TD>

<TD/>
</TR>

<TR>
<TD>
<Link>Chapter 9: Is ChatGPT Capable of Generating Accurate Information? Academics Employ Strategies to Prevent Academic Dishonesty at a Faculty of Education ......................... Micheal M van Wyk </Link>
</TD>

<TD>275 </TD>
</TR>
</Table>

<Sect>
<H2 id="LinkTarget_10751">Abbreviations </H2>

<Table>
<TR>
<TD>Abbreviation </TD>

<TD>Term </TD>
</TR>

<TR>
<TD>3-D </TD>

<TD>Three-Dimensional </TD>
</TR>

<TR>
<TD>4IR </TD>

<TD>Fourth Industrial Revolution </TD>
</TR>

<TR>
<TD>5IR </TD>

<TD>Fifth Industrial Revolution </TD>
</TR>

<TR>
<TD>6IR </TD>

<TD>Sixth Industrial Revolution </TD>
</TR>

<TR>
<TD>7IR </TD>

<TD>Seventh Industrial Revolution </TD>
</TR>

<TR>
<TD>AAG </TD>

<TD>Automated Article Generator </TD>
</TR>

<TR>
<TD>AD </TD>

<TD>Academic Dishonesty </TD>
</TR>

<TR>
<TD>ADB </TD>

<TD>African Development Bank </TD>
</TR>

<TR>
<TD>Admin </TD>

<TD>Administration </TD>
</TR>

<TR>
<TD>AES </TD>

<TD>Automated Essay Scoring </TD>
</TR>

<TR>
<TD>AGI </TD>

<TD>Artificial General Intelligence </TD>
</TR>

<TR>
<TD>AI </TD>

<TD>Artificial Intelligence </TD>
</TR>

<TR>
<TD>AIED </TD>

<TD>Artificial Intelligence in Education </TD>
</TR>

<TR>
<TD>AI-ML </TD>

<TD>Artificial Intelligence and Machine Learning </TD>
</TR>

<TR>
<TD>AIWBE </TD>

<TD>Adaptive and Intelligent Web-Based Educational System </TD>
</TR>

<TR>
<TD>ALICE </TD>

<TD>Artificial Linguistic Internet Computer Entity </TD>
</TR>

<TR>
<TD>ALS </TD>

<TD>Adaptive Learning System </TD>
</TR>

<TR>
<TD>ANN </TD>

<TD>Artificial Neural Network </TD>
</TR>

<TR>
<TD>APFEI </TD>

<TD>Assessment of Process for Efficiency Improvement </TD>
</TR>

<TR>
<TD>API </TD>

<TD>Academic Performance Index </TD>
</TR>

<TR>
<TD>APT </TD>

<TD>Automated Paraphrasing Tool </TD>
</TR>

<TR>
<TD>AR </TD>

<TD>Augmented Reality </TD>
</TR>

<TR>
<TD>ASIS </TD>

<TD>The American Society for Information Science </TD>
</TR>

<TR>
<TD>AT </TD>

<TD>Auto Tech </TD>
</TR>

<TR>
<TD>ATS </TD>

<TD>Aided Teaching System </TD>
</TR>

<TR>
<TD>AWA </TD>

<TD>Automated Writing Assistant </TD>
</TR>

<TR>
<TD>AWE </TD>

<TD>Automated Writing Evaluation </TD>
</TR>

<TR>
<TD>Abbreviation </TD>

<TD>Term </TD>
</TR>

<TR>
<TD>AWG </TD>

<TD>Australian Writers’ Guild </TD>
</TR>

<TR>
<TD>AWT </TD>

<TD>Automated Writing Tool </TD>
</TR>

<TR>
<TD>BBC </TD>

<TD>British Broadcasting Corporation </TD>
</TR>

<TR>
<TD>BMI </TD>

<TD>Brain-Machine Interface </TD>
</TR>

<TR>
<TD>BYOD </TD>

<TD>Bring Your Own Device </TD>
</TR>

<TR>
<TD>CAAF </TD>

<TD>The Collaborative Assessment Analytical Framework </TD>
</TR>

<TR>
<TD>CAI </TD>

<TD>Computer-Aided Instruction Systems </TD>
</TR>

<TR>
<TD>CALL </TD>

<TD>Computer-Assisted Language Learning </TD>
</TR>

<TR>
<TD>CAPS </TD>

<TD>The South African Curriculum and Assessment Policy Statement </TD>
</TR>

<TR>
<TD>CAT </TD>

<TD>Computerised Adaptive Test </TD>
</TR>

<TR>
<TD>CCR </TD>

<TD>The Center for Curriculum Redesign </TD>
</TR>

<TR>
<TD>CF </TD>

<TD>Cooperative Filtering </TD>
</TR>

<TR>
<TD>ChatGPT </TD>

<TD>Chat Generative Pre-Trained Transformer </TD>
</TR>

<TR>
<TD>CHE </TD>

<TD>Council on Higher Education </TD>
</TR>

<TR>
<TD>CHELSA </TD>

<TD>The Committee of Higher Education Libraries of South Africa </TD>
</TR>

<TR>
<TD>CIT </TD>

<TD>Computers and Information Technology </TD>
</TR>

<TR>
<TD>CMCSS </TD>

<TD>Canadian Mission Control Space Services </TD>
</TR>

<TR>
<TD>cMOOC </TD>

<TD>‘Connectivist’ or ‘Constructivist’ Massive Open Online Course </TD>
</TR>

<TR>
<TD>CMU </TD>

<TD>Carnegie Mellon University </TD>
</TR>

<TR>
<TD>CNN </TD>

<TD>Convolution Neural Network </TD>
</TR>

<TR>
<TD>COL </TD>

<TD>Commonwealth of Learning </TD>
</TR>

<TR>
<TD>COMEST </TD>

<TD>Commission Mondiale D’éthique des Connaissances Scientifiques et des Technologies (The World Commission on the Ethics of Scientific Knowledge and Technology) </TD>
</TR>

<TR>
<TD>COVID-19 </TD>

<TD>Coronavirus disease of 2019 </TD>
</TR>

<TR>
<TD>DBTS </TD>

<TD>Dialogue-Based Tutoring System </TD>
</TR>

<TR>
<TD>DevOps </TD>

<TD>Development Operations </TD>
</TR>

<TR>
<TD>DHET </TD>

<TD>Department of Higher Education and Training </TD>
</TR>

<TR>
<TD>DL </TD>

<TD>Deep Learning </TD>
</TR>

<TR>
<TD>Abbreviation </TD>

<TD>Term </TD>
</TR>

<TR>
<TD>DM </TD>

<TD>Data Mining </TD>
</TR>

<TR>
<TD>DoE </TD>

<TD>Department of Education </TD>
</TR>

<TR>
<TD>DSRPAI </TD>

<TD>Dartmouth Summer Research Project on Artificial Intelligence </TD>
</TR>

<TR>
<TD>DWA </TD>

<TD>Digital Writing Assistant </TD>
</TR>

<TR>
<TD>E4.0 </TD>

<TD>Education 4.0 </TD>
</TR>

<TR>
<TD>E5.0 </TD>

<TD>Education 5.0 </TD>
</TR>

<TR>
<TD>E-A-T </TD>

<TD>Expertise, Authority, and Trustworthiness </TD>
</TR>

<TR>
<TD>ECTS </TD>

<TD>European Credit Transfer and Accumulation System </TD>
</TR>

<TR>
<TD>EDM </TD>

<TD>Educational Data Mining </TD>
</TR>

<TR>
<TD>EdTech </TD>

<TD>Educational Technology </TD>
</TR>

<TR>
<TD>edX Mooc </TD>

<TD>Electronic Data Exchange Massive Open Online Course </TD>
</TR>

<TR>
<TD>EFL </TD>

<TD>English (as a) Foreign Language </TD>
</TR>

<TR>
<TD>EI </TD>

<TD>Emotional Intelligence </TD>
</TR>

<TR>
<TD>ELE </TD>

<TD>Exploratory Learning Environment </TD>
</TR>

<TR>
<TD>ETS </TD>

<TD>The Educational Testing Service </TD>
</TR>

<TR>
<TD>EU </TD>

<TD>European Union </TD>
</TR>

<TR>
<TD>FAFSA </TD>

<TD>Free Application for Federal Student Aid </TD>
</TR>

<TR>
<TD>FIP </TD>

<TD>Fair Information Practice </TD>
</TR>

<TR>
<TD>GAIED </TD>

<TD>Generative Artificial Intelligence for Education </TD>
</TR>

<TR>
<TD>GAN </TD>

<TD>Generative Adversarial Network </TD>
</TR>

<TR>
<TD>GDPR </TD>

<TD>General Data Protection Regulation </TD>
</TR>

<TR>
<TD>GEM </TD>

<TD>Global Education Monitoring </TD>
</TR>

<TR>
<TD>Gen-AI </TD>

<TD>Generative Artificial Intelligence </TD>
</TR>

<TR>
<TD>GIGO </TD>

<TD>Garbage in, Garbage out </TD>
</TR>

<TR>
<TD>GMAT </TD>

<TD>The Graduate Management Admission Test </TD>
</TR>

<TR>
<TD>GPT </TD>

<TD>Generative Pre-Trained Transformer </TD>
</TR>

<TR>
<TD>GRE </TD>

<TD>The Graduate Record Examination </TD>
</TR>

<TR>
<TD>H5P </TD>

<TD>HTML5 Package </TD>
</TR>

<TR>
<TD>HCI </TD>

<TD>Human-Computer Interface/Interaction </TD>
</TR>

<TR>
<TD>HE </TD>

<TD>Higher Education </TD>
</TR>

<TR>
<TD>Abbreviation </TD>

<TD>Term </TD>
</TR>

<TR>
<TD>HERNANA </TD>

<TD>The Higher Education Research and Advocacy Network in Africa </TD>
</TR>

<TR>
<TD>HITL </TD>

<TD>Human-in-the-Loop </TD>
</TR>

<TR>
<TD>HLEG </TD>

<TD>The High-Level Expert Group on Artificial Intelligence </TD>
</TR>

<TR>
<TD>HoTEL </TD>

<TD>Holistic Approach to Technology Enhanced Learning </TD>
</TR>

<TR>
<TD>I4R </TD>

<TD>Industry 4.0 </TD>
</TR>

<TR>
<TD>I5R </TD>

<TD>Industry 5.0 </TD>
</TR>

<TR>
<TD>IALS </TD>

<TD>Individual Adaptive Learning System </TD>
</TR>

<TR>
<TD>IAS </TD>

<TD>Institute Administration System </TD>
</TR>

<TR>
<TD>IBM </TD>

<TD>International Business Machines </TD>
</TR>

<TR>
<TD>ICAF </TD>

<TD>The Intelligent Collaborative Assessment Framework </TD>
</TR>

<TR>
<TD>ICAI </TD>

<TD>International Centre for Academic Integrity </TD>
</TR>

<TR>
<TD>ICIE </TD>

<TD>International Center for Information Ethics </TD>
</TR>

<TR>
<TD>ICT </TD>

<TD>Information and Communication Technology </TD>
</TR>

<TR>
<TD>ID3 </TD>

<TD>Iterative Dichotomiser 3 </TD>
</TR>

<TR>
<TD>IDG </TD>

<TD>Inner Development Goal </TD>
</TR>

<TR>
<TD>IE </TD>

<TD>Information Ethics </TD>
</TR>

<TR>
<TD>IEA </TD>

<TD>The Intelligent Essay Assessor </TD>
</TR>

<TR>
<TD>IEEE </TD>

<TD>Institute of Electrical and Electronics Engineers </TD>
</TR>

<TR>
<TD>IES </TD>

<TD>Intelligent Educational System </TD>
</TR>

<TR>
<TD>IFC </TD>

<TD>International Finance Corporation </TD>
</TR>

<TR>
<TD>IFLA </TD>

<TD>The International Federation of Library Associations and Institutions </TD>
</TR>

<TR>
<TD>IHE </TD>

<TD>Institution of Higher Education </TD>
</TR>

<TR>
<TD>IIoT </TD>

<TD>Industrial Internet of Things </TD>
</TR>

<TR>
<TD>ILE </TD>

<TD>Interactive Learning Environment </TD>
</TR>

<TR>
<TD>ILO </TD>

<TD>International Labour Organization </TD>
</TR>

<TR>
<TD>IoDSA </TD>

<TD>Institute of Directors South Africa </TD>
</TR>

<TR>
<TD>IoT </TD>

<TD>Internet of Things </TD>
</TR>

<TR>
<TD>IP </TD>

<TD>Intellectual Property </TD>
</TR>

<TR>
<TD>IPA </TD>

<TD>Interpretative Phenomenology Analysis </TD>
</TR>

<TR>
<TD>Abbreviation </TD>

<TD>Term </TD>
</TR>

<TR>
<TD>ISS </TD>

<TD>International Space Station </TD>
</TR>

<TR>
<TD>IT </TD>

<TD>Information Technology </TD>
</TR>

<TR>
<TD>ITS </TD>

<TD>Intelligent Tutoring System </TD>
</TR>

<TR>
<TD>IWBE </TD>

<TD>Intelligent Web-Based Education </TD>
</TR>

<TR>
<TD>JRC </TD>

<TD>Joint Research Centre </TD>
</TR>

<TR>
<TD>KPMG </TD>

<TD>Klynveld, Peat, Marwick, and Goerdeler </TD>
</TR>

<TR>
<TD>LA </TD>

<TD>Learning Analytics </TD>
</TR>

<TR>
<TD>LEK </TD>

<TD>Lawrence, Evans, and Koch </TD>
</TR>

<TR>
<TD>LINC </TD>

<TD>Logic and Information Network Compiler </TD>
</TR>

<TR>
<TD>LLM </TD>

<TD>Large Language Model </TD>
</TR>

<TR>
<TD>LMS </TD>

<TD>Learning Management System </TD>
</TR>

<TR>
<TD>LPM </TD>

<TD>Language Processing Model </TD>
</TR>

<TR>
<TD>LSA </TD>

<TD>Latent Semantic Analysis </TD>
</TR>

<TR>
<TD>LSTM </TD>

<TD>Long-Short Term Memory </TD>
</TR>

<TR>
<TD>MIT </TD>

<TD>Massachusetts Institute of Technology </TD>
</TR>

<TR>
<TD>ML </TD>

<TD>Machine Learning </TD>
</TR>

<TR>
<TD>MOOC </TD>

<TD>Massive Open Online Course </TD>
</TR>

<TR>
<TD>NLG </TD>

<TD>Natural Language Generation </TD>
</TR>

<TR>
<TD>NLP </TD>

<TD>Natural Language Processing </TD>
</TR>

<TR>
<TD>NLU </TD>

<TD>Natural Language Understanding </TD>
</TR>

<TR>
<TD>NNAPI </TD>

<TD>Neural Networks Application Programming Interface </TD>
</TR>

<TR>
<TD>NOAI </TD>

<TD>Natural Organic Artificial Intelligence </TD>
</TR>

<TR>
<TD>NQF </TD>

<TD>National Qualifications Framework </TD>
</TR>

<TR>
<TD>NTU </TD>

<TD>The National Taiwan University </TD>
</TR>

<TR>
<TD>ODeL </TD>

<TD>Open Distance e-Learning </TD>
</TR>

<TR>
<TD>ODL </TD>

<TD>Open Distance Learning </TD>
</TR>

<TR>
<TD>OECD </TD>

<TD>Organisation for Economic Cooperation and Development </TD>
</TR>

<TR>
<TD>OER </TD>

<TD>Open Educational Resources </TD>
</TR>

<TR>
<TD>OPT </TD>

<TD>Open Pre-Trained Transformer </TD>
</TR>

<TR>
<TD>OSRQ </TD>

<TD>Open Short Response Question </TD>
</TR>

<TR>
<TD>Abbreviation </TD>

<TD>Term </TD>
</TR>

<TR>
<TD>PA </TD>

<TD>Predictive Analytics </TD>
</TR>

<TR>
<TD>PAPA </TD>

<TD>Privacy, Accuracy, Property, and Accessibility </TD>
</TR>

<TR>
<TD>PAPAS </TD>

<TD>Privacy, Accuracy, Property, Accessibility, and Security </TD>
</TR>

<TR>
<TD>PBCL </TD>

<TD>Project-Based Collaborative Learning </TD>
</TR>

<TR>
<TD>PBL </TD>

<TD>Project-Based Learning </TD>
</TR>

<TR>
<TD>PEG </TD>

<TD>Project Essay Grade </TD>
</TR>

<TR>
<TD>PI </TD>

<TD>The Philosophy of Information </TD>
</TR>

<TR>
<TD>PLA </TD>

<TD>Predictive Learning Analytics </TD>
</TR>

<TR>
<TD>PLATO </TD>

<TD>Programmed Logic for Automatic Teaching Operations </TD>
</TR>

<TR>
<TD>PPDM </TD>

<TD>Privacy Preserving Data Mining </TD>
</TR>

<TR>
<TD>PRISMA </TD>

<TD>Preferred Reporting Items for Systematic Reviews and Meta-Analyses </TD>
</TR>

<TR>
<TD>Prof </TD>

<TD>Professor </TD>
</TR>

<TR>
<TD>QA </TD>

<TD>Quality Assurance </TD>
</TR>

<TR>
<TD>R&amp;D </TD>

<TD>Research and Development </TD>
</TR>

<TR>
<TD>Rand </TD>

<TD>Research and Development </TD>
</TR>

<TR>
<TD>RERC </TD>

<TD>Research Ethics Review Committee </TD>
</TR>

<TR>
<TD>RFID </TD>

<TD>Radio Frequency Identification </TD>
</TR>

<TR>
<TD>SAKI </TD>

<TD>Self-Adaptive Keyboard Instructor </TD>
</TR>

<TR>
<TD>SDG </TD>

<TD>Sustainable Development Goal </TD>
</TR>

<TR>
<TD>SEE </TD>

<TD>Social, Emotional, and Ethical </TD>
</TR>

<TR>
<TD>SEL </TD>

<TD>Social and Emotional Learning </TD>
</TR>

<TR>
<TD>SEO </TD>

<TD>Search Engine Optimisation </TD>
</TR>

<TR>
<TD>SMART </TD>

<TD>Specific, Measurable, Achievable, Relevant, Time-Bound </TD>
</TR>

<TR>
<TD>SSA </TD>

<TD>Sub-Saharan Africa </TD>
</TR>

<TR>
<TD>STEM </TD>

<TD>Science, Technology, Engineering, and Mathematics </TD>
</TR>

<TR>
<TD>SVM </TD>

<TD>Support Vector Machine </TD>
</TR>

<TR>
<TD>SVVR </TD>

<TD>Scientific Visualisation and Virtual Reality </TD>
</TR>

<TR>
<TD>TA </TD>

<TD>Trustworthy Assessment </TD>
</TR>

<TR>
<TD>TEA </TD>

<TD>Technology Enhanced Assessment </TD>
</TR>

<TR>
<TD>Abbreviation </TD>

<TD>Term </TD>
</TR>

<TR>
<TD>TICCIT </TD>

<TD>Time-Shared Interactive Computer-Controlled Information Television </TD>
</TR>

<TR>
<TD>TVET </TD>

<TD>Technical and Vocational Education and Training </TD>
</TR>

<TR>
<TD>UK </TD>

<TD>United Kingdom </TD>
</TR>

<TR>
<TD>UNESCO </TD>

<TD>United Nations Educational, Scientific, and Cultural Organisation </TD>
</TR>

<TR>
<TD>UNICEF </TD>

<TD>United Nations Children’s Fund </TD>
</TR>

<TR>
<TD>US </TD>

<TD>United States (of America) </TD>
</TR>

<TR>
<TD>VR </TD>

<TD>Virtual Reality </TD>
</TR>

<TR>
<TD>WBE </TD>

<TD>Web-Based Education </TD>
</TR>

<TR>
<TD>WEF </TD>

<TD>World Economic Forum </TD>
</TR>

<TR>
<TD>WGA </TD>

<TD>Writers Guild of America </TD>
</TR>

<TR>
<TD>WHO </TD>

<TD>World Health Organisation </TD>
</TR>

<TR>
<TD>WI </TD>

<TD>Web Intelligence </TD>
</TR>

<TR>
<TD>XAI </TD>

<TD>Explainable Artificial Intelligence </TD>
</TR>

<TR>
<TD>xMOOC </TD>

<TD>Extended Massive Open Online Course </TD>
</TR>

<TR>
<TD>ZB </TD>

<TD>Zettabyte </TD>
</TR>
</Table>
</Sect>

<Sect>
<H2 id="LinkTarget_10752">Foreword </H2>

<Link><Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_4.jpg"/>
</Figure>
</Link>

<Sect>
<H3>When the rhythm of the music changes, the dance step must change also </H3>

<P>Tony J. Mays  </P>

<Link><Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_5.jpg"/>
</Figure>
</Link>

<Link>
<P>Director: Education Commonwealth of Learning Burnaby, Canada </P>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_6.jpg"/>
</Figure>
</Link>

<Sect>
<H4>Introduction </H4>

<P>When Prof (Professor) Geesje van den Berg first informed me that they were working on a book about AI in education, I was thrilled, as discussions surrounding technological innovation are often dominated by the discourse of the Global North, meaning that the voices from the developing economies of the Global South are insufficiently represented. When she later invited me to contribute a foreword, I felt both honoured and intrigued to be considered, as there are many individuals undertaking pioneering work in this field who might offer a more unique perspective. However, Prof Van den Berg and I have known each other for many years, having collaborated at UNISA on an interim educator qualification called the National Professional Diploma in Education, and subsequently on a Master’s programme in open and distance learning (ODL), as well as having contributed to the same textbook on Curriculum Studies. Therefore, I was confident that the new publication would be something to eagerly anticipate. </P>

<P>I also have a longstanding interest in the use of technology, particularly, though not exclusively, in the provision of ODL. As noted in a recent keynote presentation for UNISA (Mays 2024a) several years ago, I worked with a large group of out-of-school youths aged around 23 who had completed their matriculation but were unable to pass a standard industry mathematics test required for entry-level employment opportunities. We needed a responsive online platform to motivate these learners to reengage with mathematics at approximately a Grade 6 level. We opted for Khan Academy, which was already utilising a form of AI (artificial intelligence). Students worked through activities and earned badges for motivation. A dashboard displayed their progress, showing how each small achievement contributed to completing the overall programme. Within minutes, each student was following a slightly different trajectory through the subject content in a manner that would have been difficult for a lone educator in a physical classroom using printed textbooks to replicate (there were more than 100 students in the group). Furthermore, the backend tutor functionality allowed me, as their chosen tutor, to monitor their progress and provide additional personalised feedback as needed. Most of the time, only a small percentage of students required this personalised human engagement. I was very impressed by how the technology enabled me to offer more tailored support. </P>

<Link><Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_7.jpg"/>
</Figure>
</Link>

<P>Now that I am Director of the Education Sector at the COL (Commonwealth of Learning), that interest in technology continues. I believe that the COL has a key role to play in supporting the development and dissemination of useful knowledge in an open way to address practical, theoretical, and policy issues in the use of technology in education provision generally and with the evolving possibilities of Gen-AI (generative artificial intelligence) in particular. For example, as reported in our news stories, the COL recently collaborated with the National University of Samoa to use Gen-AI to develop a virtual tutor, able to answer most recurring questions about the Moodle platform being used (COL 2023). Subsequently, the COL worked with the University of the South Pacific to successfully enhance its Semester Zero induction programme, using similar technology to address both learning management systems and content queries (COL 2024). Learning from these experiences, COL recently inaugurated a collaborative project called ‘Teacher-in-the-Loop,’ in which several of our initiatives work together to use Gen-AI to help educators develop OER (open educational resources) for STEM (Science, Technology, Engineering, and Mathematics) and TVET (technical and vocational education and training) subjects, which is not only scientifically accurate but also actively seeks to pre-empt any possible biases, particularly in terms of gender equity. A key proposition of this ongoing project is that although Gen-AI is the tool, the educator makes the final decision on what to use and how. </P>

<P>The COL also recently published the results of a survey indicating that more support was needed for policy development in relation to AI deployment (Paskevicius 2024) – a need which was confirmed by other research (Linderoth, Hultén, &amp; Stenliden 2024; Ratten &amp; Jones 2023) – and followed up on the findings of this report by developing policy guidelines (Mohamed &amp; Mishra 2024). So, our interests are aligned. </P>

<P>In the balance of this foreword, I want to reflect a little on the notion of human intelligence, the evolving field of AI, and some of the practical implications for teaching and learning, and then use these reflections as lenses for my initial engagement with this exciting new publication. </P>
</Sect>

<Sect>
<H4>Reflection on Intelligence </H4>

<P>The African proverb that heads this foreword offers a pragmatic insight into how to deal with change. This seems like a good starting point because, at least to my mind, change seems central to the concept of intelligence, whether reactive or proactive. Moreover, a publication focused on ‘artificial’ intelligence presupposes a contrast with ‘non-artificial,’ presumably human intelligence. Sternberg (2024), writing for the Encyclopedia Brittanica, offers the following definition: ‘[H]uman intelligence, mental quality that consists of the abilities to learn from experience, adapt to new situations, understand and handle abstract concepts, and use knowledge to manipulate one’s environment.’ </P>

<P>Sternberg notes that there are various theories about the nature of human intelligence and identifies the following core groups, among others: Psychometric, cognitive, cognitive-contextual, biological, and hemispheric. He also observes that our intelligence/s may change over time. We do not have space to explore all these perspectives here, but it seems useful to include a short reflection to help explain the complexity. Human beings access information and experiences through their senses, so it is important for us to ask questions like, ‘Did I really see what I thought I saw?,’ ‘Did I really hear what I thought I heard?’ and so on. Once we have clarity on these questions, we tend to have an emotional response, for example, ‘I don’t like that, so I won’t engage further,’ while a more intelligent response might be, ‘I don’t like that; why do I not like it? What can I learn by trying to understand my dislike?’ </P>

<P>In a similar vein, human beings live in a cultural context and might react to information or an experience by not wanting to engage further because what is said or done and by whom run counter to their own cultural norms. However, they might learn something by consciously examining their own norms and comparing them with those of others. Once we have passed through these initial filters, we might bring to bear our reasoning and logic to examine the information or experience in a critical way to understand it better, and then once we have a better understanding, we will likely evaluate that understanding from the perspective of our own worldview. The point being made is that human beings engage with the world in a multi-faceted way. They also make many daily decisions which are not necessarily based on logic or on an analysis of huge datasets and, in fact, human beings often infer meaning and make decisions based on very limited data. Moreover, human beings make connections between data, ideas, and phenomena that are sometimes not obviously connected at all and often also disparate in form. In addition, we have a strong propensity to imagine that which does not exist, or at least not yet in some cases; so authors like Jules Verne, Isaac Asimov, and Arther Clarke were able to write novels about journeys to the bottom of the sea or to the moon, and about robots and earth satellites, even before such things became possible technologically. Human intelligence (or intelligences) is therefore a complex field, but is the key counterpoint to the current focus on AI. </P>
</Sect>

<Sect>
<H4>The evolving field of Artificial Intelligence </H4>

<P>Oxford Languages (2024) defines AI as ‘the theory and development of computer systems able to perform tasks that normally require human intelligence, such as visual perception, speech recognition, decision-making, and translation between languages.’ Although the concept of non-human intelligence can be found in ancient mythology and in a sense, began to seem practicable near the start of the industrial era with the invention of machines like Babbage’s difference engine, Scott (2024) observes that what we now refer to as AI has been around for a long time even in the modern era, with a focus on ‘programmed learning’ in the 1960s and 1970s, which advanced in the 1980s and 1990s into basic personalisation, the development of more adaptive virtual systems in the 2000s, and the more recent LLMs (large language models) revolution which has paved the way for virtual tutoring, chatbots for teletutoring, personalised learning content, automated grading, and automated administrative tasks. As with any new technology, there are potential opportunities, potential risks, and the need for experimentation and discourse (Baidoo-Anu &amp; Owusu Ansah 2023). We also need to learn how best to use the evolving affordances of the technology, including gaining a nuanced understanding of its strengths and weaknesses in different contexts (Bozkurt &amp; Sharma 2023; Cooper 2023; Du, Sun, Jiang, Atiquil Islam, &amp; Gu 2024; Smolansky, Cram, Raduescu, Zeivots, Huber, &amp; Kizilcec 2023). </P>

<P>As Denny, Gulwani, Heffernan, Käser, Moore, Rafferty, and Singla (2024) observe, the appropriate use of AI in an education context requires technical expertise, experience of working with students, and knowing how to deploy technology at scale. It will impact how we teach, design a curriculum, manage assessment, work with OER, and manage tensions such as between homogenisation, creativity, and the personalised/individualised. It will allow personalised, real-time feedback and create more time for human facilitation, but it also raises concerns about data privacy, algorithmic bias, academic integrity, and the need for substantially more educator training (Eke 2023; Nikolic, Daniel, Haque, Belkina, Hassan, &amp; Grundy 2023; OER Africa 2023; Onesi-Ozigagun, Ololade, Eyo-Udo, &amp; Ogundipe 2024). </P>

<P>In the most recent (7th) edition of their research on AI, Perrault and Clark (2024) conclude that AI beats humans on some tasks, but not all, that it can make workers more productive and lead to higher quality work, and also that it can accelerate scientific progress. However, they note the lack of robust and standardised evaluations for LLM responsibility and observe that as people become more aware of what AI can do, they also become more concerned about the implications. This would include concerns related to ethics, curriculum design, continuous learning, and industry alignment, among others (Abulibdeh, Zaidan, &amp; Abulibdeh 2024). </P>

<P>Williamson (2024) observes that while we cannot anticipate all future risks, we know from our past experience that the deployment of new technologies is not a linear process. We therefore need an active discourse between the technology developers on the one hand and educators on the other to preempt any unintended consequences. </P>

<P>Overall, this suggests the need for a pragmatic response to use human intelligence and human-made intelligence in a synergistic way. However, some things already seem obvious to implement: We can automate routine administration and teaching tasks; manage a balance between personalised support and over-dependence; support students and educators to use and critique AI responses; focus on more authentic assessment; address cybersecurity and academic integrity issues; make ethical use of analytics; provide ongoing professional development for educators; and be open to trying new things in a responsible and ethical way (Bates 2024; Du et al. 2024; Kizilcec 2024; Law 2024; Sevnarayan &amp; Potter 2024; Williamson 2024). </P>
</Sect>

<Sect>
<H4>Implications for Practice </H4>

<P>As noted in another discussion (Mays 2024b), we need to explore the link between pedagogy and technology and the implications for educators’ practice. Having established a framework, we can then explore the possible disruptive influence of AI. We also need to revisit assessment because if we change our assessment, then other things will fall into place (Rowntree 1987). In this context, we should think of pedagogy as an overarching term meaning the art and science of teaching, realising that it needs to be nuanced for different populations and contexts – for example, for adults (andragogy), for self-directed learning in ODL (heutagogy), and for appropriateness for cultural context (e.g., ubuntugogy from southern Africa). Overall, we should promote the notion of ‘open’ pedagogy (Bates 2022; Hegarty 2015). </P>

<P>We have no shortage of theories about how people learn, as the interactive map created by Millwood (2021) demonstrates. Despite this diversity of learning theories, we can identify at least four broad approaches: Behaviourist/instructivist, cognitive/ constructivist, socio-constructivist, and connectivist. These should not be regarded as mutually exclusive positions – our practice likely draws on all four to a lesser or greater extent at different times. Depending on our perspective, we will then use a particular technology in a different way, for example, the use of instructivist lecture-type videos in xMOOCs (extended massive open online courses) as opposed to the use of videos in cMOOCs (‘connectivist’ or ‘constructivist’ massive open online courses) where the video content will likely be scaffolded and presented differently but be augmented by additional applications like discussion forums. How we engage with AI or Gen-AI will similarly be influenced by what we believe about how learning happens. </P>

<P>Given the continuing challenges of the digital divide, we need to adopt a pragmatic approach to the use of technology. In many countries we still need to make provision for content that can be printed and/or shared digitally to a basic device at a Wi-Fi-enabled centre, especially for the more rural areas. We can, however, enrich the core content with, for example, video and audio content – whether using broadcast technology or embedded in an open textbook for students with higher end devices but limited access to the internet. We might then also need a version that is fully online, exploiting H5P (HTML5 package) for interactivity and including online discussion fora, which could have both synchronous and non-synchronous elements, as well as involving social media. As noted in the recently published GEM (global education monitoring) report for the Pacific (COL &amp; UNESCO 2024), despite challenging contexts, we can and often do adapt technologies to find solutions that are more fit for context – AI and Gen-AI are no exception. </P>

<P>Once we know what we want to teach, why, and how, there is no shortage of technologies to choose from. The latest version of the ‘padagogy’ wheel, recently updated to include AI (Carrington 2024), provides some examples. However, we should remember the creator’s earlier observation that it is not fundamentally about the technology but the pedagogy (Carrington 2015). Bates (n.d.) offers a useful heuristic SECTIONS in this regard. </P>

<P>Stoker (2024), a LINC (logic and information network compiler) instructor for the ISS (international space station) of British Columbia, regularly uses Chat GPT (generative pre-trained transformer) in his work. He notes that it helps him to reduce the time needed for planning, assessment design, differentiation, and analysis of what is working in his presentations and what not, so that changes can be made before a topic is taught again and that over time, working with the application, it has learned what he likes and makes increasingly useful recommendations. Stoker also observes that AI has been used to support learning in multiple contexts ranging from Georgia State University, which in 2013 introduced AI to identify and automate feedback to at-risk students, linking them directly with a human tutor where indicated, to schools in rural India which developed AI tools for tablets to provide feedback to students on their subject-based learning. In both cases, these interventions resulted in improved retention and success. He notes that Singapore even experimented with humanoid robots to provide support for students whose educators were on sick leave. He notes that AI in the classroom can take care of many routine tasks freeing the educator to spend more time interacting directly with their students. </P>

<P>As with all new technologies, AI presents both opportunities and challenges. We should not, perhaps, succumb to the hype of the early adopters, but at the same, we should guard against being overly conservative and deny our educators and students the opportunity to exploit some of the more obvious early benefits. This publication can help us make more informed choices. </P>
</Sect>

<Sect>
<H4>Overview of the Chapters in this Publication </H4>

<P>This publication is divided into two volumes. In Volume 1, Chapter 1, Garth Aziz discusses the potential use of AI in pedagogies employed in the humanities. The author notes that educators are responsible for preparing students to succeed in an AI-mediated world, but this should not be limited to a pragmatic preparation for employment but should rather foster a pedagogy of care, ethics, and good judgement, while retaining a sense of exploration and wonder. In Chapter 2, Lorette Jacobs and Karin McGuirk explore the changing roles of information professionals in a HE (higher education) sector increasingly using AI, noting the increasingly central importance of this role as part of a wider professional community of practice. In Chapter 3, Erna Oliver identifies the need to work more intelligently and responsibly with AI to foster creative innovation. </P>

<P>In Chapter 4, Johannes Cronjé explores an integrated, process-based framework for writing with AI, but finally suggests that from a posthuman perspective, the focus should not be on the student but on the task. In Chapter 5, Karen Ferreira-Meyers examines the potential role of AI in the instructional design process. The author observes that while AI presents an immense potential for transforming HE, carefully considering ethical, social, and pedagogical dimensions is essential. The author argues the need to foreground cooperation, wisdom, and responsibility as educational values no less important than efficiency or personalisation, and the need to harmonise automation with our deepest humanity. In Chapter 6, Lilia Cheniti-Belcadhi, Mohamed Mitwally, and Asma Hadyaoui explore intelligent frameworks for assessment in AI-enhanced learning environments. They observe that while AI has the potential to greatly improve the accuracy, customisation, and efficiency of assessments, leading to a more adaptable, inclusive, and effective era in education, there are still several concerns that need to be addressed. For example, they suggest that AI assimilation and the processing of educational data pose major dangers to the privacy and integrity of such sensitive information, making student data privacy a critical issue. The authors further suggest that an effective resolution of ethical concerns requires collaboration among professionals from diverse fields such as technology, education, ethics, and policymaking. </P>

<P>In Chapter 7, Elmarie Kritzinger and Sarah Jane Johnston present a multi-dimensional approach to a digital transformation process in HE. They explore the interplay between key stakeholders, AI drivers, and a systematic process of commitment, strategy, testing, implementation, and evaluation. In Chapter 8, Hiro Saito argues the need to look beyond the economic and work-related potential of AI to consider how AI might be utilised to augment humanity itself in service of greater equity, solidarity, freedom, and inclusiveness, and to accelerate learning and flourishing toward collective wellbeing. </P>

<P>In Chapter 9, Joseph Evans Agolla and Phineas Sebopelo observe that many claims currently being made about the potential of AI are not grounded in empirical evidence and that in the African context, it is necessary to find a balance between various competing interests concerning other developments and the implementation of AI tools in education systems. In Chapter 10, in a related discussion, Phineas Sebopelo and Joseph Evans Agolla then argue that AI technology will generate opportunities for accelerating transformation in education. ODL institutions should therefore strategise on how to leverage AI and new technologies to enhance efficiency, encourage learning, and foster creativity while ensuring fairness and equity. They suggest using AI to reshape students’ experiences, enhance assessment, streamline administrative tasks, and personalise learning. </P>

<P>In Volume 2, Chapter 1, Samuel Amponsah, Micheal van Wyk, and Michael Adarkwah call for a more holistic and adaptive approach to academic integrity in the digital age. They suggest that integrating GPT detectors and technological tools should be complemented by a commitment to addressing the root causes of academic anxiety. By fostering a culture of trust, transparency, and resilience, they suggest that we can navigate these challenges successfully, ensuring that the pursuit of knowledge remains a transformative and enriching experience for students. </P>

<P>In Chapter 2, Nicky Tjano argues that there is a need to create an educational future that is both technologically advanced and morally sound. This involves tackling issues presented by concern about mounting students’ dependency on AI, reduced human interaction, communication, and ethical gaps. The author suggests that the journey to maintaining academic integrity in the AI age requires ongoing self-examination, adjustment, and a resolute dedication to the values that constitute the core of intellectual endeavours. </P>

<P>In Chapter 3, Faiza Gani explores the link between AI and QA (quality assurance) in HE. The author notes that the opportunities presented by AI link to QA factors and can improve these factors. On the other hand, there is also a link between the challenges provided and QA factors. Consequently, for QA factors not to be compromised, the challenges presented by AI must be carefully mitigated. </P>

<P>In Chapter 4, Brenda van Wyk and Marlene Holmner trace developments in information ethics through history. They suggest that we are currently at a critical point where the swift progress in AI technology necessitates a proactive and deliberate approach to ethical considerations. They also suggest that the OECD (Organisation for Economic Cooperation and Development) FIP (fair information practice) standards serve as a fundamental framework for safeguarding data and ensuring privacy, which are of utmost importance in the era of AI. These principles, in conjunction with the guidelines for responsible AI, they argue, provide a clear path for the ethical, transparent, fair, and inclusive development of AI. </P>

<P>In Chapter 5, Nicky Tjano unpacks the role of big data, AI, and predictive analytics in education, suggesting that creating hybrid mechanisms for review processes will assist in mitigating risks. He further adds that this can be augmented by introducing new specialised committees to ensure that unfamiliar or too technical research projects related to AI and big data are properly assessed for ethical compliance. These changes mean that educators, research review committees, and educational institutions need to relook at their governance processes, protocols, and policy frameworks. </P>

<P>In Chapter 6, Geesje van den Berg suggests that as AI tools will only get better and more human-like, so HE will have to adapt accordingly. Although Gen-AI has the potential to improve teaching and learning for everyone, the key is finding a balance and using Gen-AI to enhance, rather than replace, traditional educational practices. The author argues that collaboration among lecturers, researchers, policymakers, and students will then be essential to ensure that AI is used ethically and responsibly in education. The goal is to create a more equitable and effective education system that provides students with personalised teaching, feedback, and support by addressing the challenges posed by AI technologies and leveraging their benefits. </P>

<P>In Chapter 7, Geesje van den Berg and Patience Mudau explore the potential benefits and challenges that AI presents from the perspective of educators. They note that from their research, the most prominent benefits identified were that these digital tools can provide individualised, 24/7 guidance and support, and also augment learning content. They also note that these tools can assist with simplifying language, a unique benefit in multilingual contexts. Furthermore, they argue that Gen-AI tools provide opportunities for active involvement and that they can save time and make information and knowledge accessible. However, they note concerns that the technologies could provide biased information, lead to decreased human interaction and lack emotional intelligence. Additionally, it could provide inaccurate information and has a risk that students can over-rely on these tools. They have further identified the risks of plagiarism and data privacy and security as challenges. These guidelines focus on the need for policies and AI literacy training, setting an example and providing guidelines to students using Gen-AI. Another important point is that the Gen-AI tools should complement, and not replace, the human elements of teaching and learning, ensuring a holistic and inclusive educational approach. </P>

<P>In Chapter 8, Elize du Plessis argues the need to reimagine online assessment methods observing that Gen-AI holds immense potential to revolutionise how we evaluate knowledge and skills. From adaptive assessments to automated grading and feedback, these models can enhance student and educator assessment experiences. However, careful attention must be given to addressing challenges such as bias, reliability, security, and privacy. Collaboration between researchers, educators, and policymakers is necessary to leverage the benefits of LLMs while upholding ethical standards and ensuring the fairness and validity of online assessments. She further argues that academics should investigate Gen-AI, identify its limitations, evaluate its potential applications within the relevant disciplines or teaching contexts, and engage in discussions about all these aspects with students, who are likely already familiar with the technology. </P>

<P>In Chapter 9, Micheal van Wyk explores the issue of academic dishonesty and how it might be prevented. The author suggests that academics must redesign authentic context-based assessment tasks, such as specific case studies, problem-solving activities, project-based learning tasks, and academic writing skills. The research report suggests that education faculties need to update AI policies, provide guidelines, and educate students about AI literacy skills and AI detector tools for the ethical use of Gen-AI tools in teaching and learning. The author argues that this must include developing students’ ability to use AI in appropriate ways. </P>

<P>The nineteen chapters in this two-volume publication offer insights into both challenges and opportunities for working with AI in general and Gen-AI in particular. Overall, the argument that emerges is for a reasoned engagement with the evolving possibilities of the new technology, but as Mark Twain is credited with observing, gradual improvement is probably going to be more useful than delayed perfection. </P>

<P>References </P>

<P>Abulibdeh, A., Zaidan, E., &amp; Abulibdeh, R. 2024. Navigating the </P>

<P>confluence of artificial intelligence and education for </P>

<P>sustainable development in the era of industry 4.0: Challenges, opportunities, and ethical dimensions. Journal of Cleaner Production
<Link> 437(1). 140527. https://doi.org/10.1016/j. </Link>

<Link>jclepro.2023.140527 </Link>
</P>

<P>Baidoo-Anu, D. &amp; Owusu Ansah, L. 2023. Education in the era of generative artificial intelligence (AI): Understanding the potential benefits of ChatGPT in promoting teaching and learning. Journal of AI 7(1):52-62. https://doi.org/10.2139/ ssrn.4337484 </P>

<P>Bates. AW. n.d. 12. Using SECTIONS to select digital tools. Pressbooks. 
<Link>Available at: https://pressbooks.pub/everydayid/chapter/using</Link>

<Link>sections-to-select-digital-tools/</Link>
. (Accessed on 13 December 2024). </P>

<P>Bates. AW. 2022. Teaching in a digital age: Guidelines for designing teaching and learning. 3rd
<Link> ed. Available at: https://pressbooks.bccampus. </Link>

<Link>ca/teachinginadigitalagev3m/</Link>
. (Accessed on 11 December 2024). </P>

<P>Bates, AW. 2024. What should universities do about AI for teaching and learning? Online Learning and Distance Education Resources. 
<Link>26 April 2024. Available at: https://www.tonybates. </Link>

<Link>ca/2024/04/26/what-should-universities-do-about-ai-for</Link>

<Link>teaching-and-learning/</Link>
. (Accessed on 10 December 2024). </P>

<P>Bozkurt, A. &amp; Sharma, RC. 2023. Generative AI and prompt engineering: The art of whispering to let the genie out of the algorithmic world. Asian Journal of Distance Education 18(2):i-vi. </P>

<P>Carrington, A. 2015. The padagogy wheel: It’s not about the apps, it’s 
<Link>about the pedagogy. Available at: https://www.teachthought. </Link>

<Link>com/technology/the-padagogy-wheel/</Link>
. (Accessed on 1 December 2024). </P>

<P>Carrington, A. 2024. The padagogy wheel, ENG AI V7.0. Available at: 
<Link>https://designingoutcomes.com/assets/PadWheelV7/PW_ENG_ </Link>

<Link>AI_V7.0.pdf</Link>
. (Accessed on 1 December 2024). </P>

<P>COL (Commonwealth of Learning). 2023. Samoa pioneers AI-powered learner support. Commonwealth of Learning. 21 August 2023. 
<Link>Available at: https://www.col.org/news/samoa-pioneers-ai</Link>

<Link>powered-learner-support/</Link>
. (Accessed on 14 December 2024). </P>

<P>COL (Commonwealth of Learning). 2024. USP enhanced its Semester Zero programme with GPT-powered AI support. Commonwealth of Learning
<Link>. 22 April 2024. Available at: https://www.col.org/ </Link>

<Link>news/usp-enhanced-its-semester-zero-programme-with</Link>

<Link>gpt-powered-ai-support/</Link>
. (Accessed on 14 December 2024). </P>

<P>COL (Commonwealth of Learning) &amp; UNESCO (United Nations </P>

<P>Educational, Scientific, and Cultural Organisation). 2024. UNESCO. Global education monitoring report 2024. Pacific: Technology in education: A tool on whose terms? Available </P>

<P>at: 
<Link>https://unesdoc.unesco.org/ark:/48223/pf0000391211</Link>
. (Accessed on 2 December 2024). </P>

<P>Cooper, G. 2023. Examining science education in ChatGPT: An exploratory study of generative artificial generative intelligence. Journal of Science Education and Technology 32:444
<Link>452. https://doi.org/10.1007/s10956-023-10039-y </Link>
</P>

<P>Denny, P., Gulwani, S., Heffernan, NT., Käser, T., Moore, S., Rafferty, AN., &amp; Singla, A. 2024. Generative AI for Education (GAIED): Advances, opportunities, and challenges. Arxiv. 10 pages. Available at: 
<Link>https://arxiv.org/pdf/2402.01580</Link>
. (Accessed on 5 December 2024). </P>

<P>Du, H., Sun, Y., Jiang, H., Atiquil Islam, AYM., &amp; Gu, X. 2024. Exploring the effects of AI literacy in teacher learning: An empirical study. Humanities and Social Sciences Communications 11. Article 
<Link>559. 10 pages. https://doi.org/10.1057/s41599-024-03101-6 </Link>
</P>

<P>Eke, DO. 2023. ChatGPT and the rise of generative AI: Threat to </P>

<P>academic integrity? Journal of Responsible Technology 13. 100060. 
<Link>4 pages. https://doi.org/10.1016/j.jrt.2023.100060 </Link>
</P>

<P>Hegarty, B. 2015. Attributes of open pedagogy: A model for using open educational resources. Educational Technology July-August:3-13. </P>

<P>Kizilcec, RF. 2024. To advance AI use in education, focus on understanding educators. International Journal of Artificial Intelligence in Education 
<Link>34:12-19. https://doi.org/10.1007/ </Link>

<Link>s40593-023-00351-4 </Link>
</P>

<P>Law, L. 2024. Application of generative artificial intelligence (GenAI) in language teaching and learning: A scoping literature review. Computers and Education Open 
<Link>6. 13 pages. https://doi. </Link>

<Link>org/10.1016/j.caeo.2024.100174 </Link>
</P>

<P>Linderoth, C., Hultén, M., &amp; Stenliden, L. 2024. Competing visions of artificial intelligence in education – a heuristic analysis on sociotechnical imaginaries and problematizations in policy guidelines. Policy Futures in Education 
<Link>22(8):1662-1678. https:// </Link>

<Link>doi.org/10.1177/14782103241228900 </Link>
</P>

<P>Mays, T. 2024a. Rooting for robots through pragmatic pedagogy. Keynote address to the UNISA International Conference on Teaching and Learning, Pretoria, South Africa on 7 May 2024. 
<Link>Available at: http://hdl.handle.net/11599/5599. (Accessed on 12 </Link>
December 2024). </P>

<P>Mays, T. 2024b. Pedagogy 4.0: Technology enhanced teaching and learning. Keynote address to the Teaching with 21st Century Technology Conference at the University of the West Indies, Moana, Jamaica, 28 November 2024. Available at: 
<Link>http://hdl. </Link>

<Link>handle.net/11599/5587</Link>
. (Accessed on 12 December 2024). </P>

<P>Millwood, R. 2021. Learning theory. HoTEL (holistic approach to 
<Link>technology enhanced learning). Available at: https:// </Link>

<Link>cmapspublic3.ihmc.us/rid=1WRDG1P8T-5LW948-1MKQ</Link>
. (Accessed on 6 December 2024). </P>

<P>Mohamed, A. &amp; Mishra, S. 2024. Developing policy guidelines for artificial intelligence in post-secondary institutions. COL 
<Link>(Commonwealth of Learning). Available at: http://hdl.handle. </Link>

<Link>net/11599/5615</Link>
. (Accessed on 6 December 2024). </P>

<P>Nikolic, S., Daniel, S., Haque, R., Belkina, M., Hassan, GM., &amp; Grundy, </P>

<P>S. 2023. ChatGPT versus engineering education assessment: </P>

<P>A multidisciplinary and multi-institutional benchmarking </P>

<P>and analysis of this generative artificial intelligence tool to </P>

<P>investigate assessment integrity. European Journal of Engineering Education 
<Link>48(4):559-614. https://doi.org/10.1080/03043797.20 </Link>

<Link>23.2213169 </Link>
</P>

<P>OER (open educational resources) Africa. 2023. Three ways artificial intelligence could change how we use open educational 
<Link>resources. Blog. Available at: https://www.oerafrica.org/ </Link>

<Link>content/three-ways-artificial-intelligence-could-change</Link>

<Link>how-we-use-open-educational-resources</Link>
. (Accessed on 7 December 2024). </P>

<P>Onesi-Ozigagun, O., Ololade, YJ., Eyo-Udo, NL., &amp; Ogundipe, DO. </P>

<P>2024. Revolutionizing education through AI: A comprehensive </P>

<P>review of enhancing learning experiences. International Journal of Applied Research in Social Sciences
<Link> 6(4):589-607. https://doi. </Link>

<Link>org/10.51594/ijarss.v6i4.1011 </Link>
</P>

<P>Oxford Languages. 2024. Artificial intelligence – definition. Available at: 
<Link>https://languages.oup.com/google-dictionary-en</Link>
. (Accessed on 7 December 2024). </P>

<P>Paskevicius, M. 2024. Policy and practice of artificial intelligence in teaching and learning at post-secondary educational institutions in the Commonwealth. COL (Commonwealth of 
<Link>Learning). Available at: http://hdl.handle.net/11599/5605. </Link>
(Accessed on 2 December 2024). </P>

<P>Perrault, R. &amp; Clark, J. 2024. Artificial Intelligence Index Report 2024. </P>

<P>Stanford University: Human-Centered Artificial Intelligence. Ratten, V. &amp; Jones, P. 2023. Generative artificial intelligence (ChatGPT): </P>

<P>Implications for management educators. The International Journal of Management Education
<Link> 21(3). 7 pages. https://doi. </Link>

<Link>org/10.1016/j.ijme.2023.100857 </Link>
</P>

<P>Rowntree, D. 1987. Assessing students: How shall we know them? 2nd ed. London: Kogan. </P>

<P>Scott, P. 2024. AI in open education. Virtual keynote address on 24 September 2024 at University of Ghana Conference on the theme ‘Embracing the future: The emergence of AI and its impact on open distance and e-learning.’ COL (Commonwealth 
<Link>of Learning). Available at: http://hdl.handle.net/11599/5663. </Link>
(Accessed on 23 December 2024). </P>

<P>Smolansky, A., Cram, A., Raduescu, C., Zeivots, S., Huber, E., &amp; Kizilcec, RF. 2023. Educator and student perspectives on the impact of generative AI on assessments in higher education. L@S ’23: Proceedings of the Tenth ACM Conference on Learning @ Scale, July 2023. 
<Link>https://doi.org/10.1145/3573051.3596191 </Link>
</P>

<P>Sevnarayan, K. &amp; Potter, M-A. 2024. Generative artificial intelligence in distance education: Transformations, challenges, and impact on academic integrity and student voice. Journal of Applied Learning &amp; Teaching
<Link> 7(1):104-114. https://doi.org/10.37074/ </Link>

<Link>jalt.2024.7.1.41 </Link>
</P>

<P>Sternberg, RJ. 2024. Human intelligence. 
<Link>Britannica.com</Link>
. Available at: 
<Link>https://www.britannica.com/science/human-intelligence</Link>

<Link>psychology</Link>
. (Accessed on 17 December 2024). </P>

<P>Stoker, M. 2024. Collaborating with AI to create a collaborative classroom. Presentation CCBYSA, Personal Communication. YouTube
<Link>. Available at: https://www.youtube.com/ </Link>

<Link>watch?app=desktop&amp;v=Fbr6RMOxyt0</Link>
. (Accessed on 12 December 2024). </P>

<P>Williamson, B. 2024. The social life of AI in education. International Journal of Artificial Intelligence in Education 
<Link>34:97-104. https:// </Link>

<Link>doi.org/10.1007/s40593-023-00342-5 </Link>
</P>
</Sect>
</Sect>
</Sect>

<Sect>
<H2 id="LinkTarget_10753">Section 1 </H2>

<Sect>
<H3>Ethics, Integrity, and AI in Higher Education: Navigating Challenges and Shaping Futures </H3>

<Link><Figure id="LinkTarget_10754">

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_8.jpg"/>
</Figure>
</Link>
</Sect>
</Sect>

<Sect>
<H2>Chapter 1 </H2>

<P>Navigating Academic Anxiety and </P>

<P>Fostering Integrity in the Age of </P>

<Sect>
<H3>GPT Detectors </H3>

<P>Samuel Amponsah </P>

<Link><Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_9.jpg"/>
</Figure>
</Link>

<P>Department of Distance Education University of Ghana Accra, Ghana </P>

<Link><Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_10.jpg"/>
</Figure>
</Link>

<P>Micheal M van Wyk </P>

<Link><Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_11.jpg"/>
</Figure>
</Link>

<Link>
<P>Department of Education University of South Africa Pretoria, South Africa </P>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_12.jpg"/>
</Figure>
</Link>

<P>Michael A Adarkwah </P>

<Link><Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_13.jpg"/>
</Figure>
</Link>

<P>Institute for Education and Culture Friedrich Schiller University Jena Jena, Germany </P>

<Link><Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_14.jpg"/>
</Figure>
</Link>

<Sect>
<H4>Introduction </H4>

<P>Academic integrity is a fundamental principle of education and serves as the bedrock upon which the integrity of knowledge and scholarship rests (Bretag, Harper, Burton, Ellis, Newton, Rozenberg, Saddiqui, &amp; Van Haeringen 2019:1849). In today’s digital age, where the boundless expanse of information is merely a click away and educational landscapes have gravitated towards online platforms, the challenges to maintaining academic integrity have metamorphosed (Eaton &amp; Gysbers 2021:48). As asserted by Kumar (2020:133), students’ encounters with academic anxiety have surged, complicating the educational landscape even further. </P>

<Link><Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_15.jpg"/>
</Figure>
</Link>

<P>Against this backdrop, the emergence of GPT (generative pre-trained transformer) detectors beckons both promise and disquiet among major stakeholders in education, especially the academy. These advanced AI (artificial intelligence) tools have been meticulously designed to unearth and thwart plagiarism and cheating. They also have the potential to reconfigure the very fabric of academic integrity which has aroused questions regarding the place of critical thinking among students and the current generation. However, the debate on academic malfeasance seems to have no end in sight from the human perspective, as GPT detectors are also casting shadows of privacy infringement, fairness disparities, and the omnipresent question of technology’s role in modern education (Jones &amp; Lee 2023:15). </P>

<P>By way of definition, academic integrity embodies the pledge to honesty, originality, and ethical conduct in all facets of the learning journey (Bretag et al. 2019:1851). Safeguarding academic integrity upholds a culture of trust and respect within the academic community, ensuring that the rewards – grades and degrees – echo genuine understanding and competence. Nevertheless, the digital era has ushered in a new era for academic integrity, one riddled with pitfalls and challenges (Eaton &amp; Gysbers 2021:47). In addition, the internet’s vast sea of knowledge and information at the disposal of students presents them with the burgeoning temptation to plagiarise or engage in acts of dishonesty at every point in time. In response to these evolving circumstances, IHEs (institutions of higher education) have found themselves at a crossroads over the past few years. At the same time, this has compelled many IHEs to devise innovative strategies to preserve the sanctity of learning and assessments. According to Steele (2023:5), the internet is filled with falsehood, fake information, and no shortage of misinformation. </P>

<P>We concur that in a world where everything can be faked, it is practical to identify real truth. Important questions such as the following beg answers: ‘Who did quality assurance on programmes or products?’ and ‘Who has done the quality assurance (checks and balances) of the content generated in the tech companies?’ Moreover, several studies have reported the dangers of chatbots (Eaton &amp; Gysbers 2012:57; Liang, Yuksekgonul, Mao, Wu, &amp; Zou 2023:1 of 9). Unless prompted otherwise, GPT detectors can potentially threaten entire academic projects. Moreover, while the digital age has revolutionised how we access and disseminate information, it has engendered a pandemic of academic anxiety among students (Kumar 2020:139). Contemporary students find themselves caught in the crossfire of mounting academic pressure and elevated expectations for success. The relentless competition facilitated by digital platforms and the omnipresence of educational metrics magnify this pressure. </P>

<P>As students grapple with the looming spectre of failure or falling short of academic standards, stress and anxiety become their companions. Regrettably, some resort to unethical practices to stave off these anxieties and safeguard their academic standing. This dates back to the days when students would plagiarise large doses of materials into their projects or assignments without due recourse to the original owners of the work or the consequences when they were caught out engaging in such unethical behaviours. It is, however, worth noting that academic worries, when left unchecked, can exert a profound toll on students’ mental wellbeing and their capacity to engage in critical thinking processes that define genuine learning experiences and authentic assessments. </P>

<P>In response to the multifaceted challenges presented by the digital age, a collaborative alliance between IHEs and technology companies has given birth to innovative solutions (Smith &amp; Smith 2022:178). Among these solutions, GPT detectors, powered by the advanced capabilities of natural language processing and machine learning algorithms have been developed to address issues of academic misconduct. Their primary mission lies in the identification of instances of plagiarism, cheating, and other forms of academic dishonesty (Jones &amp; Lee 2023:17). By meticulously scrutinising written assignments, essays, and academic works, they search for any signs of unoriginal content or improperly cited sources. Through the harnessing of AI, GPT detectors are believed to furnish a formidable defence against academic misconduct, serving as the vanguards of educational sanctity. </P>

<P>Considering the preceding, GPT detectors undertake a comprehensive examination by comparing the work of students with extensive databases of academic and non-academic content (Smith &amp; Smith 2022:179). Their keen algorithms detect similarities and anomalies that may suggest plagiarism or unauthorised collaboration. Upon the discovery of such discrepancies, the detectors swiftly alert instructors or academic integrity committees, who in turn, are empowered to take the requisite measures (Jones &amp; Lee 2023:19). By automating this process, GPT detectors unburden educators from the onerous task of policing academic dishonesty, thus granting academics the ability to channel their energies into the noble pursuit of teaching and nurturing critical thinking among their students. </P>

<P>Despite the promise they hold, GPT detectors introduce a trove of ethical and privacy concerns (Bretag et al. 2019:1843). The implementation of these technologies invariably necessitates the intrusive surveillance of students’ academic work and online activities (Eaton &amp; Gysbers 2021:59). There have been instances where GPTs have relayed detected plagiarism in students’ work which proved otherwise upon further probe. The lack of understanding of the usage of such tools among educators also cultivates erroneous judgements on students’ work at times. It is for such reasons that some critics argue that this heightened surveillance infringes upon students’ privacy rights and has the potential to stifle open communication and free expression in educational settings (Kumar 2020:134). As a result, striking the delicate balance between preserving academic integrity and fostering an environment conducive to creativity and innovation poses an intricate challenge for IHEs. </P>

<P>Furthermore, the fairness of GPT detectors raises apprehensions (Smith &amp; Smith 2022:188). These systems, although intelligent, may inadvertently penalise students whose writing styles bear resemblance to existing academic or online content, even when the students’ work is genuinely original. The spectre of false positives and negatives looms large, potentially inflicting unjust academic penalties and jeopardising the academic journey of students with no ill intent (Jones &amp; Lee 2023:21). Again, this brings to the fore the need for educators to have in-depth knowledge of how these GPT detectors function and the skill to probe the systems in order not to unfairly penalise students for their hard work. </P>

<P>While genuine concerns exist regarding the use of GPT detectors, these tools also bear the potential to become catalysts for the development of critical thinking skills in students (Eaton &amp; Gysbers 2021:56). By acting as deterrents against academic dishonesty, GPT detectors sculpt an environment where students are not merely encouraged but compelled to engage in critical and independent thinking. They understand that their work will be assessed on its intrinsic value rather than its originality, thereby ushering in a new era of authentic and enriched learning experiences (Kumar 2020:140). Thus, there is a need to comprehensively explore the broader implications of GPTs on critical thinking in the educational realms. </P>

<P>In the context of the foregoing, this chapter argues that academic anxiety, technostress, and integrity related to the use of GPTs exist and theorises plausible resolutions to remedy these situations among students. To achieve this, the ensuing sections provide an overview of GPT detectors, delves deeply into the concerns raised with academic integrity and anxiety, and charts the way out of this situation. </P>
</Sect>

<Sect>
<H4>Overview of GPT Detectors </H4>

<P>In the age of digital learning and the ubiquitous availability of information, the need to maintain academic integrity has never been more crucial. The evolution of technology and educational methods has paved the way for innovative tools designed to detect and prevent academic dishonesty. Among these, GPT detectors stand out as sophisticated AI solutions specifically engineered to identify and deter plagiarism, cheating, and other forms of academic misconduct (Smith &amp; Smith 2022:176). Amongst the most well-known GPT detectors are GPTZero, ZeroGPT, GPT 2 output detector, CheckGPT, and Writefull. According to Steele (2023:3), these detectors can be used to enhance students’ critical thinking, free up their intellectual passion and reading comprehension, and teach academic writing skills. Given the preceding, there is a need to provide a comprehensive overview of GPT detectors, delving into their development, functionality, and their role in reshaping academic integrity in the modern educational landscape. </P>

<P>GPT detectors are products of the growing intersection between AI and education. They have evolved in response to the escalating challenges posed by the digital age, where vast repositories of information are easily accessible, and online learning platforms have become the norm (Jones &amp; Lee 2023:23). These detectors are derivatives of the broader GPT family of language models, which include models such as GPT 3, GPT 4, and beyond. GPT language models are pre-trained on massive datasets that encompass a wide array of texts, ranging from books and academic articles to internet content. This pre-training equips them with an extraordinary grasp of natural language understanding and generation. </P>

<P>GPT detectors, however, are specialised variations of these models, fine-tuned to tackle specific issues including academic integrity and anxiety. The fine-tuning process involves training the model on a dataset of academic content, including essays, research papers, and various academic resources (Smith &amp; Smith 2022:186). This specific training enables GPT detectors to distinguish between authentic academic work and content that may be plagiarised or in violation of academic integrity. GPT detectors aim to maintain and promote academic honesty. Bretag et al. (2019:1855) summarise the functionality of the chatbots performing the following sequential activities: Content analysis, data comparison, similarity assessment, citation, referencing analysis, alerting, and reporting. With the overview of GPT detectors provided, the critical issues of academic anxiety, technostress, and academic integrity are discussed below. </P>
</Sect>

<Sect>
<H4>Analysing the Landscape of Academic Anxiety and Technostress </H4>

<P>Academic anxiety is emotionally exhausting and involves feelings of worry, distress, and tension regarding the potential failure of an assignment or exam. A student’s personal life and academic performance could be affected as well. A correlation study by Shakir (2014:31) reports a negative correlation between academic anxiety and academic performance among secondary school students. Additionally, Das (2019:20) conducted a similar study with mathematics students and discovered that academic anxiety is significantly negatively related to students’ academic performance. Thus, the concentration and motivation of students are disrupted by high levels of anxiety. Moreover, academic anxiety, fuelled by technostress, became a widely discussed topic during and after the COVID-19 (Coronavirus disease of 2019) pandemic and the hype surrounding generative AI tools like ChatGPT 3.5 (chat generative pre-trained transformer 3.5) (Amponsah, Van Wyk, &amp; Kolugu 2022:7). Technostress itself is defined as the negative psychological (e.g., anxiety), physiological (e.g., fatigue), and behavioural (e.g., exhaustion) responses humans have to technology overload (Kumar 2024:1 of 16). If not properly managed by IHEs, academic anxiety induced by technostress can result in a cognitive avoidance of distress, uncertainty, and psychological discomfort. </P>

<P>It is imperative to add that the past four decades of research on academic anxiety has predominantly been characterised by cognitions and behavioural patterns of students which have remained consistent as dimensions of poor academic performance. Also, scholars have reported patterns of academic anxiety which are associated with fear of negative evaluations (Brown, White, Doan, &amp; De Bruin 2011:331), poor study skills (Sanghvi 1995:71), poor test performance (Kivimäki 1995:51), perfectionism (Eum &amp; Rice 2011:167), and recently, technostress (Amponsah, Van Wyk, &amp; Kolugu 2022:9; Huo &amp; Siau 2023:2). Furthermore, as there has been an increase in the development of Gen-AI (generative artificial intelligence) tools like ChatGPT </P>

<P>3.5 and 4.0, so has academic anxiety and technostress increased amongst students which have affected their concentration, left them with distracted attention, excessive anxiety, tension, fear of having delays and avoidance of assignments. This also affected their general academic achievement and wellbeing (Caporusso 2023:3 of 12). </P>

<P>Studies on Gen-AI, like ChatGPT 3.5 and Google Bard as LLMs (large language modules) have disrupted and significantly impacted education positively. More importantly, it has the potential to transform and increase accessibility and accelerate education exponentially. In the chase for academic success, there are downsides to using chatbots. All these Gen-AI tools are available and have created a hype among students as they view these tools as easy options to answer assessments and develop projects in pursuit of instant success. Based on the exposure to LLMs such as ChatGPT 3.5, research has reported concerns about using ChatGPT to plagiarise or cheat in their assignments (un) knowingly because of mounting academic pressure and elevated expectations for success (Cotton, Cotton, &amp; Shipway 2023:236). </P>

<P>However, this prompt engineering phenomenon has some major drawbacks related to technostress, academic anxiety, and academic dishonesty (Ayyagari, Grover, &amp; Purvis 2011; Huo &amp; Siau 2023:2; Wach, Duong, Ejdys, Kazlauskaitė, Korzynski, Mazurek, Paliszkiewicz, &amp; Ziemba 2023:19). Additionally, the overuse of Gen-AI could increase academic anxiety and harm a student’s academic performance if not correctly and mildly used for academic purposes. Studies indicate that students have plagiarised their assignments by using Gen-AI tools to cheat in academic writing which is an academic violation and dishonesty (Azulay Chertok, Barnes, &amp; Gilleland 2014:1324; Caporusso 2023:3 of 12) which often leads to an increase in academic anxiety. Academic anxiety is regarded as a psychological and emotional statement of the mind which leads to feelings of stress, tension, low concentration levels, and fear of failure in an academic task. These distractors affect an individual’s self-esteem, motivation, and avoidance of submitting assignments timeously, which ultimately affects their academic performance. </P>

<P>Moreover, the literature reports technostress and technooverload as downside effects of social media and ChatGPT. In 1984, the concept of technostress was first used by clinical psychologist, Craig Brod, as a major cause of the overuse of technology by people. According to Amponsah et al. (2022:3), excessive use or exposure to social media can lead to technostress, cognitive overload, reduced staff morale, low productivity, and ‘academic fatigue.’ Additionally, studies have indicated that social media can lead to technostress and techno-overload (when users are compelled to use technologies faster and longer) (Leung &amp; Zhang 2017:389). However, Gen-AI has recently brought significant changes and transformations to HE (higher education), but it has also led to academic dishonesty and technostress among students (Ragu-Nathan, Tarafdar, Ragu-Nathan, &amp; Qiang 2008:423; Khanthavit &amp; Khanthavit 2023:213). Gen-AI tools such as ChatGPT 3.5, LlAMA 2, and Bard are being used more by students for academic purposes, but excessive exposure could lead to increased technostress and techno-anxiety (Chatzopoulou, Filieri, &amp; Dogruyol 2020:1272-1273). </P>

<P>In a critical analysis study, Wach et al. (2023:9) explored the ‘dark side’ of the overuse of prompt engineering like ChatGPT. The authors found that drawbacks such as threats and anxiety are major causes for concern relating to the use of ChatGPT and provided recommendations to prevent academic anxiety. This is in addition to findings that technostress is experienced by people who have excessive exposure to and overuse in reliance on technology (techno-overload) or ICTs (information and communication technologies) (Singh, Bala, Dey, &amp; Filieri 2022:271). </P>

<P>Technostress results from excessive screentime and the overuse of technology either at school, work, or in private life. This has several negative consequences such as poor work performance, depression, job dissatisfaction, and reduced work commitment (Qi 2019:1338). For example, when students face academic pressure and expectations for success, the only way out for some of them is the excessive use of chatbots as a means to cheat academically. In addition, technostress is experienced by people who have excessive exposure to and overuse in reliance on technology (techno-overload) or prolonged ICT exposure in the workplace (Singh et al. 2022:271). The cause of excessive use of ChatGPT is academic anxiety which is the inability to cope with the academic pressure when chasing success. Moreover, recent studies reported the downside of prolonged exposure to and the use of chatbots (prompt engineering). In this case ChatGPT is a cause of technostress and fatigue among students and staff (Wach et al. 2023:9). This view is supported by Caporusso (2023:9 of 12) with reference to creative displacement anxiety and technostress. Moreover, Caporusso argues that IHEs need to increase the awareness of the overuse of technology for the wellbeing of staff. </P>
</Sect>

<Sect>
<H4>Concerns with Academic Integrity </H4>

<P>Cheating, plagiarism, and other dishonest acts, which are violations of academic integrity, are an increasing concern for faculty, students, and the public who hold the trust in graduates to possess the knowledge needed to complete their studies (Azulay Chertok et al. 2014:1324). As already highlighted, common examples of academic dishonesty cited include cheating in examinations and plagiarising written assignments (Turner &amp; Beemsterboer 2003:1122). Mostofa, Tabassum, and Ahmed (2021:257) document that academic plagiarism has been on the rise globally and is commonly widespread among university students. Since the introduction of LLMs such as ChatGPT and Google Bard, many scholars have raised ethical concerns regarding academic integrity. That is, with the transformative potential of the modern and pervasive technological revolution of AI chatbots, concerns about academic integrity have become more critical than ever before (Bin-Nashwan, Sadallah, &amp; Bouteraa 2023:2 of 11). </P>

<P>The increased comfort with AI technologies makes it easier for students in online learning environments to engage in violations of academic integrity such as copy-pasting from online sources and the unauthorised use of electronic resources during exams (Azulay Chertok et al. 2014:1325). This exponential growth of unethical behaviour in HE poses crucial challenges to quality education as enshrined in SDG 4 (Sustainable Development Goal 4) and the tenets of academic integrity (Gottardello &amp; Karabag 2022:527). As a result, universities are going through an unprecedented disruption and concerns relating to the breach of academic integrity in light of the social, economic, and technological changes in HE (Roe &amp; Perkins 2022:1 of 10). </P>

<P>Furthermore, plagiarism detection software such as Turnitin and SafeAssign, as well as the GPT detectors already indicated above are designed to combat outsourcing assessment and research tasks (Liu, Yao, Li, &amp; Luo 2023:6 of 23; Perkins, Roe, Postma, McGaughran, &amp; Hickerson 2023:4 of 21), although with new LLMs such as ChatGPT, it is difficult for inexperienced faculty members and researchers to detect GPT-generated texts (Liu et al. 2023:5 of 23). However, thanks to these tools, plagiarism is being discovered at a faster rate than in the past (Halupa &amp; Bolliger 2013:298). Many IHEs, therefore, employ plagiarism detection tools to promote novelty in writing and detect novel or unexpected ways of cheating or plagiarising (Canzonetta 2021:1). </P>

<P>According to Belli, Raventós, and Guarda (2020:661), many students plagiarise because they lack confidence in their writing abilities, do not dedicate sufficient time to accomplish tasks, possess a positive attitude towards deception, or simply ignore how to properly quote a text. Ayton, Hillman, Hatzikiriakidis, Tsindos, Sadasivan, Maloney, Bragge, Diug, and Illic (2022:1925) have also found that a limited understanding of plagiarism and poor academic or language skills are among the reasons why students plagiarise. Belli et al. (2020:661) add that the cost of plagiarism is very expensive because it turns educators into police officers who have to dedicate much of their time and effort to something that does not yield any benefit to the learning environment. This invariably affects the performance of educators as they spend precious time and effort to police students’ work at the expense of research and other fulfilling engagements. </P>

<P>Nonetheless, in many IHEs, both faculty and students do not understand the concept of plagiarism, especially self-plagiarism (Halupa &amp; Bolliger 2013:303). Mostofa et al. (2021:258) assert that many students in different disciplines and academic levels have a poor awareness of plagiarism. For example, in a report published on Unlocking the power of generative AI models and systems such as GPT 4 and ChatGPT for higher education, some of the essential questions that students asked are, ‘Am I allowed to use ChatGPT for a seminar or final paper, or is that cheating?; and ‘How exactly do I use ChatGPT best?’ (Gimpel, Hall, Decker, Eymann, Lämmermann, Mädche, Röglinger, Ruiner, Schoch, Schoop, Urbach, &amp; Vandirk 2023:3). </P>

<P>According to Mostofa et al. (2021:265), not having clear ethical norms and standards about academic writing can cause an individual to fall foul of the guilt of plagiarism. While the media coverage about ChatGPT and recent LLMs has focused on ‘cheating or plagiarism’ (Gimpel et al. 2023), Perkins (2023:1 of 24) argues that the use of LLMs does not necessarily indicate plagiarism if students are transparent in how they have been used in any submission. There is also intentional and unintentional plagiarism (Ayton et al. 2022:1929), implying that it is possible for a student to unintentionally fall victim to academic misconduct or the breach of (institutional) plagiarism rules because of the nature (sensitivity) of plagiarism detection tools. </P>

<P>To conclude, Turner and Beemsterboer (2003:1123) acknowledge that academic dishonesty harms the dishonest individual because they do not acquire the basic learning, skills, or knowledge. Subsequently, the public who holds trust in the graduate’s skill is deceived and the peer of the dishonest individual who competes in class suffers harm due to an unfair advantage. The faculty of the dishonest student is harmed by not being aware of the true grasp of the content. Hence, such faculty are unable to modify instruction when necessary. Finally, the reputation of the university or institution of the dishonest student is often badly damaged when the public perceives that it does not set and uphold higher standards among both faculty and students. </P>

<P>Given the preceding, it is necessary to identify that academic integrity demands a student to adopt the practice of engaging in meaningful research and completing academic work in a fair and coherent manner (Bin-Nashwan et al. 2023:4 of 11). Bin-Nashwan et al. emphasise that fostering academic integrity requires dedication, honesty, trust, responsibility, and respect. Similarly, the International Centre for Academic Integrity (2021) has conceptualised the fostering of academic integrity as a commitment to six basic principles including honesty, trust, fairness, respect, responsibility, and courage. Therefore, IHEs are required to develop student competencies and engage in joint action with all academic communities to preserve academic integrity (Gottardello &amp; Karabag 2022:527-529). </P>

<P>Given the above, we argue that academic anxiety has negative consequences. Still, there is a need to foster integrity in the age of GPT detectors to address misinformation and the numerous falsehoods generated by Gen-AI tools. This leads to the question, ‘Can Gen-AI plagiarism tools like GPT detectors be utilised to promote critical thinking instead of detecting dishonesty and cheating among students and academia?’ To preserve the image and reputation of IHEs, it is essential to implement targeted measures against cheating. To protect academic integrity, it is argued that awareness and ethical considerations are crucial, along with possible revisions to policies related to integrity, copyright infringements, and academic practices. The latter provides a basis for IHEs to implement anti-cheating strategies. </P>

<P>The literature offers measures to prevent academic cheating by Gen-AI tools in education. For example, Oravec (2023:220</P>

<P>223) has conducted studies on cheating detection strategies and Gen-AI empowered skills to aid students in addressing academic dishonesty and plagiarism. One of their major recommendations was for educators to promote and raise awareness about the educational benefits of using Gen-AI, specifically GPT detectors, to enhance students’ and educators’ proficiencies in utilising Gen-AI tools for preventing dishonesty. Another measure to prevent cheating or academic dishonesty is using contextual case studies, providing original research and problem-based projects for students to present the results, writing, and reporting as a group. Other recommended approaches include group discussions, the development of purpose-driven objectives like specific case studies, problem-solving activities, project-based learning tasks, and empowering students with academic writing skills. We strongly believe that implementing these strategies will raise awareness and improve critical conversations about Gen-AI tools, safeguarding academic integrity and fostering students’ sense of integrity. </P>

<P>Since the emergence of GPT detectors, universities have been compelled to address policy revisions, while advocating awareness initiatives among staff and students is of vital importance to prevent dishonesty. Universities need to create an awareness of and view ethical considerations for using Gen-AI tools or GPT detectors as important considerations before adopting chatbots in practice. As a matter of urgency, academics must be cognisant of the speed of the emergence of new Gen-AI tools such as ChatGPT 3.5 and other LLMs. Studies concur that a major issue that needs to be addressed is the ethics of using Gen-AI and detector tools by students and academics (Hagendorff 2020:109). These studies express concerns of fear that breach ethical implications. Among similar views expressed related to the challenge of cheating, plagiarism, and copyright infringement all have ethical implications for the quality of education. </P>

<P>Furthermore, combating academic dishonesty requires a multifaceted approach which might include tutorials and other short educational interventions aimed at increasing the knowledge of faculty or students about academic integrity (Stoesz &amp; Los 2019:3). Azulay Chertok et al. (2014:1325) enumerate several means to foster academic integrity among students including consistently meting out disciplinary action for the violation of academic integrity, educating students about academic dishonest behaviours and their consequences, providing clarity in policy and expectations about academic dishonesty, setting up measures to eliminate or reduce possibilities of cheating, developing new methods of evaluation which are in accordance with the advancement of technology, implementing an integrity agreement between both faculty and students, and also the implementation of honour codes (ethical principles governing an academic community such as an agreement not to lie or cheat). In consensus, Bultas, Schmuke, Davis, and Palmer (2017:58) also mention that the development and clear enforcement of integrity policies, the role modelling of professional behaviour, and integrity by faculty can influence student behaviour as the use of honour codes are solutions to improve the academic integrity among students. </P>

<P>McCabe and Pavela (2004:12-15) list ten core principles of academic integrity on how faculty should foster student honesty: </P>

<L>
<LI>
<Lbl>• </Lbl>

<LBody>Recognising and affirming academic integrity as a core institutional value; </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>fostering a lifelong commitment to learning; </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>affirming the role of the educator as a guide and mentor; </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>helping students to understand the potential of the internet and how the potential can be lost if online resources are used for fraud, theft, and deception; </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>encouraging student responsibility for academic integrity; </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>clarifying expectations for students; </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>developing fair and creative forms of assessment; </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>reducing opportunities to engage in academic dishonesty; </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>responding to academic dishonesty when it occurs; and </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>helping to define and support campus-wide academic integrity standards. </LBody>
</LI>
</L>

<P>It is also believed that a careful design of assessment tasks in the form of authentic assessments can deter students from engaging in academic misconduct and further promote ethical decision-making and behaviour regarding academic work (Sotiriadou, Logan, Daly, &amp; Guest 2020). </P>

<P>Moreover, educators also need to be involved in the formulation and implementation of academic integrity policies because their understanding and beliefs about it are crucial for effective integration (Gottardello &amp; Karabag 2022:527). Academic integrity is less likely to be violated when students perceive that there is a commitment to ethical standards from all sides (allacademic community) and information about it is adequately disseminated (Gottardello &amp; Karabag 2022). Gottardello and Karabag (2022) add that involving academics and different stakeholders can contribute to forging a culture of academic integrity and identifying ways to appropriately respond to the expectations of their institutions. Also, the publication of academic integrity policies in places widely and readily accessible to students such as web pages and student manuals can facilitate its implementation. This can occur in tandem with discussion forums on academic integrity to increase awareness of the policies (Gottardello &amp; Karabag 2022; Whitley &amp; Keith-Spiegel 2001:332). </P>

<P>Lastly, institutions and educators can also utilise technologies such as Turnitin for identifying the use of APTs (automated paraphrasing tools) or LLMs for academic dishonesty (Roe &amp; Perkins 2022:5 of 10). Additionally, both faculty and students need to be trained on the proper use of APTs and LLMs to avoid academic misconduct (Roe &amp; Perkins 2022:7 of 10). A national study of first-year computing programmes also reveals several strategies for maintaining academic integrity such as education about academic integrity; discouraging cheating through monitoring or observing student work; reducing the benefits of cheating through low-stakes assessment; making cheating difficult by invigilating assessment; and empowerment by supporting and building relationships among students (Sheard et al. 2017:246), fostering academic integrity matters because it signifies that honesty persists in all endeavours of an academic environment (Turner &amp; Beemsterboer 2003:1122). </P>
</Sect>

<Sect>
<H4>Synthesis of the Critical Issues </H4>

<P>Academic integrity, being a foundational principle in education, faces unprecedented challenges in the contemporary digital era. The ubiquitous availability of information, the prevalence of online learning platforms, and the transformative role of GPT detectors demand further comprehensive exploration of the issues. This in-depth analysis thus navigates through the complexities of academic integrity, the surge in academic anxiety, the evolving landscape of GPT detectors, and critical strategies for fostering and preserving academic probity. </P>

<P>First, it is obvious that academic integrity has been compromised largely with the advent of the digital age. Academic integrity is regarded as the bedrock of education and it traditionally encapsulates a commitment to honesty, originality, and ethical conduct (Bretag et al. 2019:1849). However, the advent of the digital age has redefined the parameters of maintaining academic probity. With information accessible at the click of a button and educational landscapes shifting to online platforms, IHEs grapple with new challenges in upholding the sanctity of academic integrity (Eaton &amp; Gysbers 2021:47). </P>

<P>Second, accompanying the digital revolution is a palpable rise in academic anxiety among students, a phenomenon that Kumar (2020:133) asserts, has surged significantly. The relentless competition facilitated by digital platforms and the omnipresence of educational metrics amplify this anxiety. As students confront the looming spectre of failure or falling short of academic standards, stress becomes a prevalent companion, prompting some to resort to unethical practices as a coping mechanism (Singh et al. 2022:270). </P>

<P>Also, amidst these challenges, the emergence of GPT detectors, exemplified by models like GPTZero and CheckGPT, introduces both promise and disquiet. These advanced AI tools are meticulously designed to unearth and thwart plagiarism, cheating, and other forms of academic dishonesty, presenting a paradigm shift in how IHEs combat breaches of academic integrity (Hagendorff 2020:113). However, while GPT detectors offer a promising defense against academic misconduct, they simultaneously raise ethical and privacy concerns. The implementation of these technologies necessitates an intrusive surveillance of students’ academic work and online activities, leading to debates about potential infringements of privacy rights and the impact on open communication in educational settings (Bretag et al. 2019:1855). It is vital to expose students to detector software to discourage cheating, which may prevent dishonesty among students. Awareness should be inculcated, as well as the adoption and design of authentic learning experiences to highlight the usefulness and educational value of GPT detector software. As a strategy, webinars should be created to promote critical conversations among students and staff as a means to increase awareness of GPT detector software. We are, therefore, advocating for the need to carefully strike the delicate balance between preserving academic integrity and fostering an environment conducive to creativity and innovation. </P>

<P>The intricate interplay between academic anxiety, technostress, and the utilisation of Gen-AI tools also forms a critical nexus in the modern educational landscape (Khanthavit &amp; Khanthavit 2023:215). An excessive reliance on such tools can contribute to heightened academic anxiety, impacting students’ concentration, motivation, and their overall academic performance. In line with the thoughts of Khanthavit and Khanthavit (2023:213), the study underscores the need for a nuanced understanding of the intersection between technological advancements and student wellbeing. </P>

<P>Moreover, the evolution of academic integrity concerns dovetails with the introduction of LLMs such as ChatGPT. Plagiarism detection software and GPT detectors emerge as crucial instruments in identifying and curbing violations. However, challenges persist, especially in detecting content generated by GPTs. This study, thus, highlights the need for IHEs to adapt their strategies to effectively combat emerging forms of academic dishonesty facilitated by advanced AI (Perkins et al. 2023:3 of 21). </P>

<P>Lastly, the study revealed that strategies for fostering academic integrity in this digital epoch necessitate a multifaceted and strategic approach. The recommendations encompass a spectrum of interventions, including educational initiatives, disciplinary measures, policy clarity, technological solutions, and the active involvement of faculty in policy formulation (Azulay Chertok et al. 2014:1328; Roe &amp; Perkins 2022:10 of 10). </P>

<P>Based on the above, we conclude this section on the note that the landscape of academic integrity and the challenges posed by academic anxiety are undergoing a profound transformation in the age of GPT detectors and technological advancements. This synthesis of scholarly insights underscores the intricate interplay between preserving the sanctity of education, addressing student anxiety, and incorporating advanced tools to successfully navigate this complex terrain. </P>
</Sect>

<Sect>
<H4>Conclusion and Recommendations </H4>

<P>First, academic integrity, which is foundational to the ethos of education, faces both unprecedented threats and promising solutions. The integration of GPT detectors, exemplified by models like GPTZero and CheckGPT, signifies a paradigm shift in combating plagiarism and academic misconduct. While these AI tools offer a robust defense mechanism, their implementation necessitates a delicate balance between surveillance and safeguarding students’ privacy rights. IHEs are challenged to adapt policies and practices to ensure ethical deployment while fostering an environment conducive to creativity and innovation. </P>

<P>Second, the rise of academic anxiety among students poses a significant concern. The digital era, with its relentless competition and elevated expectations, contributes to heightened stress levels and regrettably, unethical practices. GPT detectors, while instrumental in curbing dishonest behaviours, should not be viewed in isolation. They must be part of a comprehensive approach that addresses the root causes of academic anxiety, promoting a culture of trust, support, and resilience. </P>

<P>Third, the multifaceted recommendations from scholarly articles provide a roadmap for IHEs that aim to navigate these challenges effectively. Beyond the technical aspects of implementing GPT detectors, there is a pressing need for educational interventions that enhance students’ understanding of plagiarism, academic integrity policies, and the responsible use of technology. Faculty, as key stakeholder, plays a pivotal role in not only adopting these tools but also in fostering a supportive and understanding academic environment. </P>

<P>Moreover, the fairness and accuracy of GPT detectors in identifying instances of plagiarism are crucial for the credibility of academic assessments. Continuous refinement of detection algorithms, coupled with regular audits and feedback mechanisms, ensures that these tools serve their purpose without disproportionately penalising students. Institutions must be agile in adapting strategies to the evolving tactics employed by students, addressing not only the symptoms but also the underlying causes of academic dishonesty. </P>

<P>As education embraces technological advancements, the onus lies on faculty and IHEs to empower students to navigate this new landscape responsibly. Faculty training becomes indispensable, not just in the technical aspects of using tools like GPT detectors but also in understanding the broader implications for student wellbeing. The intersection of technology and education should be approached with foresight, emphasising the wholistic development of students and creating an environment that encourages critical thinking and genuine learning experiences. </P>

<P>In essence, the conclusion drawn from this exploration is a call for a wholistic and adaptive approach to academic integrity in the digital age. The integration of GPT detectors and technological tools should be complemented by a commitment to addressing the root causes of academic anxiety. By fostering a culture of trust, transparency, and resilience, IHEs can navigate these challenges successfully, ensuring that the pursuit of knowledge remains a transformative and enriching experience for students. </P>
</Sect>

<Sect>
<H4>References </H4>

<P>Amponsah, S., Van Wyk, MM., &amp; Kolugu, MK. 2022. Academic experiences of ‘zoom-fatigue’ as a virtual streaming phenomenon during the COVID-19 pandemic. International Journal of Web-Based Learning and Teaching Technologies
<Link> 17(6):1-16. https://doi.org/10.4018/ </Link>

<Link>IJWLTT.287555 </Link>
</P>

<P>Ayyagari, R., Grover, V., &amp; Purvis, R. 2011. Technostress: Technological antecedents and implications. Management Information Systems Quarterly
<Link> 35(4):831-858. https://doi.org/10.2307/41409963 </Link>
</P>

<P>Ayton, D., Hillman, C., Hatzikiriakidis, K., Tsindos, T., Sadasivan, S., Maloney, S., Bragge, P., Diug, B., &amp; Illic, D. 2022. Why do students plagiarise? Informing higher education teaching and learning policy and practice. Studies in Higher Education 47(9):1921-1934. 
<Link>https://doi.org/10.1080/03075079.2021.1985103 </Link>
</P>

<P>Azulay Chertok, IR., Barnes, ER., &amp; Gilleland, D. 2014. Academic integrity in the online learning environment for health sciences students. Nurse Education Today
<Link> 34(10):1324-1329. https://doi. </Link>

<Link>org/10.1016/j.nedt.2013.06.002 </Link>
</P>

<P>Belli, S., Raventós, CL., &amp; Guarda, T. 2020. Plagiarism detection in the classroom: Honesty and trust through the Urkund and Turnitin software. In Rocha, Á., Ferrás, C., Montenegro Marin, CE., &amp; Medina García, VH. (Eds.): Information technology and systems, 
<Link>660-668. Cham: Springer. https://doi.org/10.1007/978-3-030</Link>

<Link>40690-5_63 </Link>
</P>

<P>Bin-Nashwan, SA., Sadallah, M., &amp; Bouteraa, M. 2023. Use of ChatGPT in academia: Academic integrity hangs in the balance. Technology in Society
<Link> 75. 102370. 11 pages. https://doi.org/10.1016/j. </Link>

<Link>techsoc.2023.102370 </Link>
</P>

<P>Bretag, T., Harper, R., Burton, M., Ellis, C., Newton, P., Rozenberg, P., Saddiqui, S., &amp; Van Haeringen, K. 2019. Contract cheating: A survey of Australian university students. Studies in Higher Education
<Link> 44(11):1837-1856. https://doi.org/10.1080/03075079.2 </Link>

<Link>018.1462788 </Link>
</P>

<P>Brown, LA., White, P., Doan, JB., &amp; De Bruin, N. 2011. Selective attentional processing to fall-relevant stimuli among older adults who fear falling. Experimental Aging Research
<Link> 37(3):330-345. https://doi.or </Link>

<Link>g/10.1080/0361073X.2011.568833 </Link>
</P>

<P>Bultas, MW., Schmuke, AD., Davis, RL., &amp; Palmer, JL. 2017. Crossing the ‘line:’ College students and academic integrity in nursing. Nurse Education Today
<Link> 56:57-62. https://doi.org/10.1016/j. </Link>

<Link>nedt.2017.06.012 </Link>
</P>

<P>Canzonetta, J. 2021. Repurposing plagiarism detection services for responsible pedagogical application and (in)formative assessment of source attribution practices. Assessing Writing 50, 
<Link>100563. https://doi.org/10.1016/j.asw.2021.100563 </Link>
</P>

<P>Caporusso, N. 2023. Generative artificial intelligence and the emergence of creative displacement anxiety: Review. Research Directs in Psychology and Behavior
<Link> 3(1). 12 pages. https://doi.org/10.53520/ </Link>

<Link>rdpb2023.10795 </Link>
</P>

<P>Chatzopoulou, E., Filieri, R., &amp; Dogruyol, SA. 2020. Instagram and bodyimage: Motivation to conform to the ‘Instabod’ and consequences on young male wellbeing. Journal of Consumer Affairs
<Link> 54:1270-1297. https://doi.org/10.1111/joca.12329 </Link>
</P>

<P>Cotton, DRE., Cotton, PA., &amp; Shipway, JR. 2023. Chatting and cheating: Ensuring academic integrity in the era of ChatGPT. Innovations in Education and Teaching International
<Link> 61(2):228-239. https://doi. </Link>

<Link>org/10.1080/14703297.2023.2190148 </Link>
</P>

<P>Das, K., 2019. Role of ICT for better mathematics teaching. Shanlax International Journal of Education
<Link> 7(4):19-28. https://doi. </Link>

<Link>org/10.34293/education.v7i4.641 </Link>
</P>

<P>Eaton, SE. &amp; Gysbers, V. 2021. Academic integrity in online learning: The role of academic librarians. Canadian Journal of Higher Education 51(2):46-64. </P>

<P>Eum, K. &amp; Rice, KG. 2011. Test anxiety, perfectionism, goal orientation, and academic performance. Anxiety, Stress, &amp; Coping 24(2):167
<Link>178. https://doi.org/10.1080/10615806.2010.488723 </Link>
</P>

<P>Gimpel, H., Hall, K., Decker, S., Eymann, T., Lämmermann, L., Mädche, A., Röglinger, M., Ruiner, C., Schoch, M., Schoop, M., Urbach, N., &amp; Vandirk, S. 2023. Unlocking the power of generative AI models and systems such as GPT-4 and ChatGPT for higher education. University of Hohenheim
<Link>. Available at: http://opus.uni</Link>

<Link>hohenheim.de/volltexte/2023/2146/</Link>
. (Accessed on 25 October 2023). </P>

<P>Gottardello, D. &amp; Karabag, SF. 2022. Ideal and actual roles of university professors in academic integrity management: A comparative study. Studies in Higher Education
<Link> 47(3):526-544. https://doi.org/ </Link>

<Link>10.1080/03075079.2020.1767051 </Link>
</P>

<P>Hagendorff, T., 2020. The ethics of AI ethics: An evaluation of guidelines. Minds and Machines
<Link> 30(1):99-120. https://doi.org/10.1007/ </Link>

<Link>s11023-020-09517-8 </Link>
</P>

<P>Halupa, C. &amp; Bolliger, DU. 2013. Faculty perceptions of student self plagiarism: An exploratory multi-university study. Journal of Academic Ethics
<Link> 11(4):297-310. https://doi.org/10.1007/s10805</Link>

<Link>013-9195-6 </Link>
</P>

<P>Huo, X. &amp; Siau, KL. 2023. Generative AI and ChatGPT impact on technostress of teachers. PACIS 2023 Proceedings. Available at: 
<Link>https://scholars.cityu.edu.hk/en/publications/generative-ai</Link>

<Link>and-chatgpt-impact-on-technostress-of-teachers(8279ae6d</Link>

<Link>c454-4a81-b645-b0edfffe1577).html</Link>
. (Accessed on 17 November 2023). </P>

<P>International Center for Academic Integrity. 2021. The fundamental values of academic integrity. 3rd ed. International Center for Academic Integrity
<Link>. Available at: https://academicintegrity.org/resources/ </Link>

<Link>fundamental-values</Link>
. (Accessed on 17 November 2023). </P>

<P>Jones, LM. &amp; Lee, MJ. 2023. Ethical considerations in automated plagiarism detection: A critical analysis. Journal of Academic Ethics 2023:1-34. </P>

<P>Khanthavit, A. &amp; Khanthavit, S. 2023. ChatGPT and stress. ABAC Journal 
<Link>43(3):213-224. https://doi.org/10.59865/abacj.2023.39 </Link>
</P>

<P>Kivimäki, M. 1995. Test anxiety, below-capacity performance, and poor test performance: Intrasubject approach with violin students. Personality and individual differences
<Link> 18(1):47-55. https://doi. </Link>

<Link>org/10.1016/0191-8869(94)00115-9 </Link>
</P>

<P>Kumar, R. 2020. Academic stress and its effect on the academic performance of students. International Journal of Social Sciences and Humanities 4(2):132-141. </P>

<P>Kumar, SP. 2024. TECHNOSTRESS: A comprehensive literature review on dimensions, impacts, and management strategies. Computers in Human Behavior Reports
<Link> 16. 100475. 16 pages. https://doi. </Link>

<Link>org/10.1016/j.chbr.2024.100475 </Link>
</P>

<P>Leung, L. &amp; Zhang, RW. 2017. Mapping ICT use at home and telecommuting practices: A perspective from work/family border theory. Telematics and Informatics
<Link> 34(1):385-396. https://doi. </Link>

<Link>org/10.1016/j.tele.2016.06.001 </Link>
</P>

<P>Liang, W., Yuksekgonul, M., Mao, Y., Wu, E., &amp; Zou, J. 2023. GPT detectors are biased against non-native English writers. arXiv
<Link> 2304.02819v3. 9 pages. Available at: https://arxiv.org/ </Link>

<Link>pdf/2304.02819.pdf</Link>
. (Accessed on 17 November 2023). </P>

<P>Liu, Z., Yao, Z., Li, F., &amp; Luo, B. 2023. Check me if you can: Detecting ChatGPT-generated academic writing using CheckGPT. arXiv 2306.05524v1. 23 pages. </P>

<P>McCabe, DL. &amp; Pavela, G. 2004. Ten (updated) principles of academic integrity: How faculty can foster student honesty. Change: The Magazine of Higher Learning
<Link> 36(3):10-15. https://doi. </Link>

<Link>org/10.1080/00091380409605574 </Link>
</P>

<P>Mostofa, M., Tabassum, M., &amp; Ahmed, SMZ. 2021. Researchers’ awareness about plagiarism and impact of plagiarism detection tools – does awareness effect the actions towards preventing plagiarism? Digital Library Perspectives
<Link> 37(3):257-274. https://doi.org/10.1108/ </Link>

<Link>DLP-10-2020-0100 </Link>
</P>

<P>Perkins, M. 2023. Academic integrity considerations of AI large language models in the post-pandemic era: ChatGPT and beyond. Journal of University Teaching &amp; Learning Practice
<Link> 20(2). 24 pages. https:// </Link>

<Link>doi.org/10.53761/1.20.02.07 </Link>
</P>

<P>Perkins, M., Roe, J., Postma, D., McGaughran, J., &amp; Hickerson, D. 2023. Game of tones: Faculty detection of GPT-4 generated content in university assessments. arXiv 2305.18081. 21 pages. </P>

<P>Oravec, JA., 2023. Artificial intelligence implications for academic cheating: Expanding the dimensions of responsible human-AI collaboration with ChatGPT. Journal of Interactive Learning Research
<Link> 34(2):213-237. https://doi. </Link>

<Link>org/10.70725/304731gmmvhw </Link>
</P>

<P>Qi, C. 2019. A double-edged sword? Exploring the impact of students’ academic usage of mobile devices on technostress and academic performance. Behaviour &amp; Information Technology 38(12):1337
<Link>1354. https://doi.org/10.1080/0144929X.2019.1585476 </Link>
</P>

<P>Ragu-Nathan, TS., Tarafdar, M., Ragu-Nathan, BS., &amp; Qiang, T. 2008. The consequences of technostress for end users in organisations: Conceptual development and empirical validation. Information Systems Research
<Link> 19(4):417-433. https://doi.org/10.1287/ </Link>

<Link>isre.1070.0165 </Link>
</P>

<P>Roe, J. &amp; Perkins, M. 2022. What are automated paraphrasing tools and how do we address them? A review of a growing threat to academic integrity. International Journal for Educational Integrity 
<Link>18(1). 10 pages. https://doi.org/10.1007/s40979-022-00109-w </Link>
</P>

<P>Sanghvi, C. 1995. Efficacy of study skills training in managing study habits and test anxiety of high test anxious students. Journal of the Indian Academy of Applied Psychology 21(1):71-75. </P>

<P>Shakir, M. 2014. Academic anxiety as a correlate of academic achievement. Journal of Education and Practice 5(10):29-36. </P>

<P>Sheard, J., Simon,1 Butler, M., Falkner, K., Morgan, M., &amp; Weerasinghe, A. 2017. Strategies for maintaining academic integrity in first-year computing courses. Proceedings of the 2017 ACM Conference on Innovation and Technology in Computer Science Education, 244
<Link>249. https://doi.org/10.1145/3059009.3059064 </Link>
</P>

<P>No initials are provided for this author. </P>

<P>Singh, P., Bala, H., Dey, BL., &amp; Filieri, R., 2022. Enforced remote working: The impact of digital platform-induced stress and remote working experience on technology exhaustion and subjective wellbeing. Journal of Business Research
<Link> 151:269-286. https://doi. </Link>

<Link>org/10.1016/j.jbusres.2022.07.002 </Link>
</P>

<P>Smith, AN. &amp; Smith, BN. 2022. The role of artificial intelligence in maintaining academic integrity. Computers &amp; Education 176. 104298. </P>

<P>Sotiriadou, P., Logan, D., Daly, A., &amp; Guest, R. 2020. The role of authentic assessment to preserve academic integrity and promote skill development and employability. Studies in Higher Education 
<Link>45(11):2132-2148. https://doi.org/10.1080/03075079.2019.15820 </Link>

<Link>15 </Link>
</P>

<P>Steele, A. 2023. Free speech platforms and the impact of the US insurrection: Misinformation in memes. Master’s thesis, 
<Link>University of Bergen, Bergen. Available at: https://hdl.handle. </Link>

<Link>net/11250/3072107</Link>
. (Accessed on 25 November 2023). </P>

<P>Stoesz, BM. &amp; Los, R. 2019. Evaluation of a tutorial designed to promote academic integrity. Canadian Perspectives on Academic Integrity 
<Link>2(1):3-26. https://doi.org/10.55016/ojs/cpai.v2i1.61826 </Link>
</P>

<P>Turner, SP. &amp; Beemsterboer, PL. 2003. Enhancing academic integrity: Formulating effective honor codes. Journal of Dental Education
<Link> 67(10):1122-1129. https://doi.org/10.1002 </Link>

<Link>/j.0022-0337.2003.67.10.tb03705.x </Link>
</P>

<P>Wach, K., Duong, CD., Ejdys, J., Kazlauskaitė, R., Korzynski, P., Mazurek, G., Paliszkiewicz, J., &amp; Ziemba, E. 2023. The dark side of generative artificial intelligence: A critical analysis of controversies and risks of ChatGPT. Entrepreneurial Business and Economics Review
<Link> 11(2):7-30. https://doi.org/10.15678/ </Link>

<Link>EBER.2023.110201 </Link>
</P>

<P>Weil, MM. &amp; Rosen, LD. 1997. Technostress: Coping with technology @work @home @play. Hoboken: John Wiley. </P>

<P>Whitley, BE. &amp; Keith-Spiegel, P. 2001. Academic integrity as an institutional issue. Ethics &amp; Behavior
<Link> 11(3):325-342. https://doi. </Link>

<Link>org/10.1207/S15327019EB1103_9 </Link>
</P>
</Sect>
</Sect>
</Sect>

<Sect>
<H2 id="LinkTarget_10755">Chapter 2 </H2>

<Link><Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_16.jpg"/>
</Figure>
</Link>

<Sect>
<H3>AI and Academic Integrity Decussation: Implications for Higher Education </H3>

<P>Nicky Tjano  </P>

<Link><Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_17.jpg"/>
</Figure>
</Link>

<Link>
<P>Director of Teaching &amp; Learning Strategy, Projects, and Portfolio Performance University of South Africa Pretoria, South Africa </P>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_18.jpg"/>
</Figure>
</Link>

<Sect>
<H4>Introduction </H4>

<P>AI (artificial intelligence) refers to systems that display intelligent behaviour by analysing their environment and taking actions </P>

<P>– with some degree of autonomy – to achieve specific goals. AI-based systems can be purely software-based, acting in the virtual world (e.g., voice assistants, image analysis software, search engines, speech, and face recognition systems) or AI can be embedded in hardware devices (e.g., advanced robots, autonomous cars, drones, or IoT [internet of things] applications). This definition was proposed by HLEG (the High-Level Expert Group on Artificial Intelligence) in 2019. Samoili, Cobo, Gómez, De Prato, Martínez-Plumed, &amp; Delipetrev (2020:4) suggest that there are two broad categories that can be identified from this definition, namely the ability of machines to reason and make decisions and learn. </P>

<P>There is a transformative force in the era of rapid </P>

<P>advancements in technology that is reforming various sectors, communities, political and educational landscapes, and so on. Nonetheless, this force is transforming the very foundation of human knowledge, and arguably human intelligence. Albert Einstein once stated that ‘the true sign of intelligence is not </P>

<P>knowledge but imagination’ (Ganiev &amp; Tashev 2021:3570). </P>

<P>Einstein went further to argue that imagining is a far more </P>

<Link><Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_19.jpg"/>
</Figure>
</Link>

<P>important skill than knowledge itself. This view implies </P>

<P>that imagination is more important than knowledge thus emphasising the need for imagination alongside knowledge in </P>

<P>fostering creativity. One would posit that AI’s transformative </P>

<P>agenda lies in changing the status quo on knowledge creation. </P>

<P>Jean-Jacques Rousseau’s views on contemporary education and </P>

<P>the role of digital technologies as captured by Jergus (2018:395</P>

<P>396) and Carvalho and Yeoman (2021:28) respectively emphasise </P>

<P>the significance of student’s natural development in the learning </P>

<P>journey and situate learning activity as an emergent process. With this evolution, technological advancement and digital revolution, spearheaded by AI, undoubtedly the longstanding and uncontested dominant status of IHEs (institutions of higher education), in particular universities, in the creation and dissemination of knowledge has now been challenged. </P>

<P>Therefore, drawing from the views of both Einstein and </P>

<P>Rousseau, it is undeniable that AI has reimagined the principle of the knowledge creation process, learning activity, as well as </P>

<P>pedagogical and andragogical strategies. This assertion aligns </P>

<P>with a point made by Eaton (2023:4 of 10) that with AI we have witnessed the era of post-plagiarism, where human creativity is enhanced rather than threatened. </P>

<P>The role of AI has been a subject of discussion both in the mainstream media and academic discourse. The developments in AI are fast changing the educational landscape as these advancements are intimately associated with improvements in computer capabilities, thus resulting in super-intelligent machines. This intelligence is actually the cornerstone of AI as its definition anchors ML (machine learning), whereby computers impersonate humans. Likewise, Trott, Jones, Chang, Michaelov, and Bergen (2023:2 of 21) expound on the human impressionist abilities LLMs (large language models) by asking if LLMs know what humans know. In that piece of work, the authors cite activities such as the ability to learn, adapt, synthesise, and self-correct to complete complex tasks. This happens through LPMs (language processing models). Without a doubt, advancements bring about a lot of opportunities, so there are challenges and associated risks (Neha 2020:308; Popenici &amp; Kerr 2017:1 of 13). </P>

<P>AI’s influence in education builds on and extends what the 4IR (fourth industrial revolution) has done as Oliver (2022b:27) affirms that 4IR alongside IoT, big data, the IIoT (industrial internet of things), and cloud computing cyber-physical systems, to mention a few, has taken the world by storm. Oliver (2022a:53), reflecting on Society 5.0 and Education 5.0, recommends that it is the responsibility of IHEs to lead in innovation incubators and be the cornerstone of ‘sustainable and positive transformation and change.’ This implies, argues Oliver (2022b:27), that IHEs should be instrumental in this era of development given the influence they have in shaping the lives of the youths, their beliefs, skills, future livelihoods, and expectations. </P>

<P>With the advent of ChatGPT (chat generative pre-trained transformer), there have been many other competing and complementary AI tools and systems focusing on computer-generated text, such as Claude, Google Bard, and DALE, to mention a few. These tools fall within the field of Gen-AI (generative artificial intelligence) and can also be used as AWAs (automated writing assistants), AWTs (automated writing tools), and APTs (automated paraphrasing tools) (Roe &amp; Perkins 2022). Other groups of effective AI tools useful for the field of education fall within the realm of IESs (intelligent educational systems) including ITSs (intelligent tutoring systems), educational robotics, and automated grading tools for multimedia systems. </P>

<P>The emergence and launch of ChatGPT in late 2022 by OpenAI has catalysed a rapid rise in open AI technologies and has effectively created an almost instant technological panic about the impact of AI on the HE (higher education) space. There has been worries around technological advancements, while some of the fears centred around diminishing our ability to think (Eaton 2023:2 of 10). Drawing on the developments, (Eaton 2023:2 of </P>

<P>10) adds that the results of technological advancements and by extension AI, are evident in how AI has quickly redefined teaching, learning, and assessment practices. </P>

<P>From a research perspective, advancements in AI present an opportunity for a transformative wave that is reshaping the landscape of academic inquiry across diverse disciplines. Ongoing debates surround the ethical use of AI, with protagonists citing the associated benefits such as enhanced adaptive teaching and assessment, personalised learning experiences, and improved virtual tutoring and support. Among these divergent debates, calls for transformative changes, the need to safeguard the integrity of assessments, and to strike a balance from an ethical viewpoint forms the basis of this chapter. </P>

<P>This chapter adopts a risk-oriented mindset and ethical lens to comprehend how advancements in AI intersect with academic integrity. By dissecting the AI decussation with academic integrity within a HE landscape, the chapter embarks on a journey through the evolving landscape of AI and its profound implications for academic integrity. First, the popularity and development cycles of AI are explored, digging into the hype surrounding Gen-AI and also projecting its future applications. Second, the transformative role of AI, particularly in the health sector is explored. The third section navigates the ongoing debate between AI and human capabilities. </P>

<P>The fourth section closely examines the decussation of AI and academic integrity from the global perspectives of academic dishonesty. This is followed by an exploration of challenges commanded by the rise of a repertoire of LLMs, with special attention to AI authoring tools. In this section, the costs and benefits of AI’s authoring tools are discussed. The positive impacts of the tools such as supporting teaching and learning to negative costs like students’ dependency on AI and plagiarism dangers as well as communication threats arising from reduced human interaction are detailed in the discussion. </P>

<P>The chapter then discourses the proposed strategies to strike a balance between AI benefits and how to maintain academic integrity. As we peer into the future, the second last section reflects on future evolving trends in the AI space in education with commitment challenges. This reflection offers valuable insights for educators, policymakers, and researchers who are constantly navigating the delicate cross paths between technological advancement, specifically AI and academic integrity. The last section provides concluding remarks on the chapter. </P>
</Sect>

<Sect>
<H4>AI Popularity and Seasons of Development </H4>

<P>AI paradigm shifts can be examined from three distinct but </P>

<P>intertwined phases of development as described by Delipetrev, </P>

<P>Tsinaraki, and Kostic (2020) in their 2020 AI-Watch report for the EU (European Union) Commission’s JRC (Joint Research Centre) examining the evolution of AI. Delipetrev et al. (2020:3) regard the development of AI and its popularity as a seasonal change phenomenon emulating economic cycles as illustrated </P>

<P>in Figure 2.1. These phases are referred to as AI’s seasons of </P>

<P>development and are characterised by sequential stages of upswings (springs for growth) and downswings (winters for </P>

<P>decline) that happen over time. The latter stages are largely due </P>

<P>to cuts in funding opportunities or budgets for development. </P>

<P>The first phase is called AI foundations and occurred between 1950 and 1970. The instrumental pioneers during this primitive era include Alan Turing and John McCarthy. Turing published </P>

<P>what would arguably and undoubtedly be his breakthrough </P>

<P>seminal paper in the field of AI, that debated machines’ ability to think (Turing 1950). The debate continued and was propelled by deliberations at Dartmouth’s conference that took place six years later after Turing’s paper on computing machinery and intelligence. At the conference, McCarthy presented his definition of AI, which he coined in 1955 as ‘the science and engineering of making intelligent machines’ (Delipetrev et al. 2020:7). </P>

<P>Since that conference, excitement about machines, specifically computers’ ability to assist with human challenges, mathematical and science equations, and continued excellence in language translation abilities surged. Furthermore, a myriad of AI initiatives and developments were witnessed – from computer programming, the discovery of perceptron, the realisation of the ELIZA NLP (natural language processing) model to Shakey the Robot (Delipetrev et al. 2020:7). Likewise, Roberts (2016:14) affirms these developments by reflecting that ‘despite the Dartmouth conference’s failure to produce an intelligent machine,’ the aftermath of the conference was nothing but a success. Narrow programmes like the Logic Theorists1 were born, overwhelmingly buying into McCarthy’s optimism and dream, as the government was optimistic about smart machines’ potential to ‘analyze masses of geological data for oil and coal exploration’ (Roberts 2016:14). </P>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_20.jpg"/>

<Caption>
<P>Figure 2.1: AI’s seasons of development. (Source: Delipetrev et al. 2020:3) </P>
</Caption>
</Figure>

<P>The symbolic AI phase ensued between 1970 and 1990. In this era, the underlying discovery was the advent of expert and knowledge systems. The nervous part of these systems was the integration of human beings’ expert knowledge through computer programming. The expert systems were able to perform blood disease diagnostics and prescriptions (Shortliffe &amp; Davis 1975:11), the ID3 (iterative dichotomiser 3) which was able to generate decision trees (Quinlan 1986:82), and the ability for humans to have dialogue with computers (Winograd 1971:75). </P>

<P>The era between 1990 and 2020 indicates that the development of AI took an upward trajectory in an exponential form. The advent of ML and DL (deep learning) has characterised this era, and without a doubt, more is yet to come. The use of convolutional neural networks that read handwritten digits </P>

<P>Logic Theorists is a computer programme written in 1956 by Allen </P>

<P>Newell and two colleagues, Simon and Shaw, engineered to perform </P>

<P>automated reasoning, and has been described as the first artificial </P>

<P>intelligence programme. </P>

<P>(LeCun, Boser, Denker, Henderson, Howard, Hubbard, &amp; Jackel 1989:400), the rise of GANs (generative adversarial networks), and transformers in language modelling for the translation and answering of questions happened in this era. The other instance of AI capabilities surpassing humans includes the 1997 era when IBM (International Business Machines) supercomputer Deep Blue defeated Gary Kasparov in a chess game. No one would have predicted that to happen to the world-renowned world chess master. There is a high degree of uncertainty associated with the future directions of AI and its advancement; however, its maximum potential can only be realised if there is trust in how it is deployed, developed, and used (Thiebes, Lins, &amp; Sunyaev 2021:447). </P>

<P>Notwithstanding all these developments in AI, and it being an important aspect of modern-day life, there are probabilities on the cards where winter seasons in AI development may follow. The same goes for seasons of greater AI spring. This, as Delipetrev et al. (2020:3) accentuate, signifies an uncertainty of the future of AI. Similarly, Schuchmann in his 2019 paper on prospects of an approaching AI winter earlier predicted that given critiques around AI, especially on DL approaches and variations in funding models through R&amp;D (research and development). In fact, on funding R&amp;D, the key role players as far as allocating substantial amounts of funds to AI are concerned, are the US (United States [of America]), China, European Commission through EU member states, India, South Korea, Canada, Japan, Israel, Russia, and Singapore (Samoili et al. 2020). Between 2009 and 2018, there were 34,000 role players globally with the US and China amassing this large concentration. </P>
</Sect>

<Sect>
<H4>AI’s Hype Cycles: Focus on Generative AI and Future Expectations </H4>

<P>In the history of human evolution, AI is arguably the most intricate and beyond-belief invention where human creativity has been tested and will arguably continue to be tested. Epstein and Hertzmann (2023:1110) affirm this view by stating that new classes of AI tools, especially Gen-AI, have the capability to likely and fundamentally turn around creativity and the idea generation </P>

<P>– from conception to deployment. </P>

<P>AI’s presumed ‘godfather,’ Geoffrey Hinton, a renowned computer scientist who is credited with having built influential and crucial AI tools has shared his crusade on the evolution of AI and the possibility of outsmarting the intelligence of humankind. This was reflected by Joshua Rothman is his article, Why the godfather of AI fears what he’s built, after the intimate interview with Hinton (Rothman 2023). In his reflections, Rothman conveys, what worried Hinton is how humans would prevent AI machines from taking control. His reflection is that no one knows the answer. This evolution and subsequent successes have amplified the debate on machine’s capability to outclass human beings. </P>

<P>Nonetheless, of the debates and successes of machines’ capabilities, the AI field has experienced periods of hype cycles. The AI hype cycle 2023 sheds light on the path and evolution of innovations within AI space, and how they have influenced various sectors (Gartner 2023). Figure 2.2 depicts five phases of the hype cycle characterised by technology triggers, also known as innovation triggers, followed by a peak of an inflated expectation phase. </P>

<P>The third phase relates to the trough of disillusionment, where the ‘hype’ or interest in the innovation starts to wane due to experimental and implementation failures. It is at this stage where the technology fails to meet the expectations. The fourth phase is characterised by a slope of enlightenment where potential benefits of technology start to make sense. The last stage is referred to as a plateau of productivity, where the mainstream adoption of technology takes off and all previous efforts including R&amp;D and marketing start to pay off as Chaffey and Ellis-Chadwick (2019:74) attribute this success to serving the niche market. </P>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_21.jpg"/>

<Caption>
<P>Figure 2.2: AI hype cycles. (Source: Gartner 2023) </P>
</Caption>
</Figure>

<P>Aligned with hype cycles, AI tools are expected to reach a plateau of productivity in four periodic phases. The Grand View Research (2023) report projects the global market size of AI to plateau at almost $2 trillion, a surge of 38.1%. In phase one, tools will reach a plateau in a period of less than two years. This period is followed by the next plateau phase, which is two to five years. In the third phase, a plateau is reached between five and 10 years, while the fourth phase constitutes a period of a decade or more. AI engineering, prompt engineering, AI simulation, and data-centric AI are selected AI fields whose productivity plateaus would mature within two years. Before the impact of AI on academic integrity is explored, Figure 2.3 illustrates how AI is applied and has benefited 11 major industries and sectors, and as Takyar (2022) asserts, AI continues to amass huge potential for the future expansion of these industries. </P>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_22.jpg"/>

<Caption>
<P>Figure 2.3: Application of AI in various industries. (Source: Takyar 2022) </P>
</Caption>
</Figure>

<P>Some of these breakthroughs have influenced and propelled the discovery of many AI tools in various other sectors, for example, various fields, like the food technology, real estate, health sector, hospitality, travel, and manufacturing (Takyar 2022). The next section selectively explores how AI has transformed the health sector. The choice of this sector is that the results are almost visible as the majority of the people interacts with the sector as patients to consult with their healthcare professionals. This discussion is then followed by a reflection on the big debate between AI and human capability. </P>
</Sect>

<Sect>
<H4>Application of AI in the Health Sector </H4>

<P>AI has cemented its place as a significant driver for economic development and has been dubbed an ‘area of strategic importance’ given its broader impact on society (Delipetrev et al. 2020:1). The growing influence of AI-driven capabilities in various sectors can be forgiven for making someone believe that they are in a world where they are watching sci-fiction. In this ‘sphere,’ one is experiencing AI algorithms’ superpower and astonishing capabilities. These amazing capabilities include the ability of AI algorithms to compose a masterpiece, which by far outclasses the best classical symphonies of all time. If one thinks of the age of the universe (Planck 2018) and the power of the universe, where Coutinho (2022) refers to the latter as ‘a wonder ahead of medicine,’ which has caught the attention of scientists and researchers and how AI has transcended into this space, the results are astonishing. To date, scientists are still trying to determine the age of the universe as Russ and his colleagues in 1997 concluded that the impact of the inhomogeneities on the global expansion factor was found to be miniature (Russ, Soffel, Kasai, &amp; Börner 1997:2046). They went further to reveal that to use Hubble constantly in the usual way to determine the age of the universe produced an error that was negligible. </P>

<P>According to Manne and Kantheti (2021:78-80), some of the benefits of AI for the health industry include aiding in understanding the needs of patients, cost reduction, and most importantly, improved efficiency. At the heartbeat of this advancement is the availability and processing of healthcare data, which are often voluminous. PWC (2017) cautions that although it is undeniable that AI is gradually becoming more and more sophisticated in performing human activities, the sustainability and accessibility of quality healthcare need to be rethought, given the increasing costs associated with the new discoveries. </P>

<P>In diagnosis and treatment design, the application of and advancement in AI research have been growing and improving. In their 2008 paper, Eren, Subasi, and Coskun (2008) showcase the power of AI in telemedicine through the efficient use of mobile devices and reveal how medical decision support systems can be effective in the provision of quality healthcare services, especially in developing countries that are usually under-resourced and limited by advancement in technologies. Some of the interesting applications of AI in the sector include the deployment of mobile device apps to effectively diagnose pregnant women (Carter, Sandall, Shennan, &amp; Tribe 2019:2) and monitoring nutritional triage for oncology patients (De Bruin, Schuh, Seeling, Luger, Gall, Hütterer, Kornek, Ludvik, Hoppichler, &amp; Schindler 2018:34-35). </P>

<P>The increasing use of AI and robotics in redefining the health sector through examination, diagnosis, and treatment prescription has been witnessed. Exploration of AI chatbots to assist in understanding depression and anxiety in patients (Manne &amp; Kantheti 2021:80) and detecting psychiatric diseases (Erguzel &amp; Ozekes 2014:52) are a few of the AI technologies being tested. Eren et al. (2008:32) suggest that AI techniques like computerised decision support system e.g., ANNs (artificial neural networks), decision trees, and support vector machines are also being used through mobile applications to diagnose and treat problematic diseases like cancer. </P>

<P>In the last decade, the focus was more on a differentiation strategy through explosive innovation in medical products such as newly developed pieces of equipment and consumables. Historical and evidence-based healthcare was influenced by an enormous amount of health data at the disposal of scientists, health practitioners, and regulatory bodies (Frost &amp; Sullivan 2016). These data are in the form of the overwhelming volume of health records, research findings, and diagnosis data that are generated daily. For example, there were approximately 11,000 articles published annually in the field of skincare. One could imagine how this data have assisted dermatologists to more accurately diagnose skin care problems for millions of patients across the globe. In terms of data, the volume of healthcare data was reported to have reached 4 ZBs (zettabytes), considering that one ZB equates a trillion gigabytes. It was projected that these data would exponentially grow by 10 times (PWC 2017:3). </P>
</Sect>

<Sect>
<H4>AI versus Humans: The Big Debate! </H4>

<P>The field of AI is dynamic and evolving at a fast pace. With its dynamism comes many transformative opportunities. The same can be stated for threats. The latter attests to the need to closely monitor AI through regulatory frameworks, policies, and guidelines. The ample potential impact of AI on humanity is yet to be fully explored. Ironically, the ‘intelligence’ of AI has been debated, and the debate is still ongoing to have the capacity to supersede human intelligence in the near future. In almost affirming this claim, Arrieta, Díaz-Rodríguez, Del Ser, Bennetot, Tabik, Barbado, García, Gil-López, Molina, Benjamins, and Chatila (2020:82) explain that with the manner and speed with which AI-powered systems are sophisticated, the advancement has led to a situation in which these systems can act or operate independently of human intervention, especially in their design and deployment. </P>

<P>For example, in the health sector, Manne and Kantheti (2021:78) are pondering whether the availability and use of AI tools in the future will replace physicians. The authors quickly respond by stating that practically, it is impossible. At most, however, these tools will enable physicians to achieve improved and accurate results. The vantage point lies in its ability to process enormous amounts of information a billion times faster, thus enabling it to decide much quicker than human beings can. Lu (2023), in an article headlined How smart is ChatGPT? reveals riveting results on how it is able to compete with humans in examinations. These results are worth reflecting on. In the latest release, ChatGPT 4 is reportedly capable of matching the human performance level in most of professional and academic assessments. Despite these breakthroughs, it is still struggling in subjects like English language and literature (Lundgren 2024; Nasrullah &amp; Wahyu 2024). ChatGPT 4’s performance in language tasks is still exhibiting inconsistencies, thus suggesting its insufficient grasp of language and literature intricacies. The subjects in which it is demonstrating high grading and comprehension accuracy and it is performing well are in the STEM (science, technology, engineering, and mathematics) field with subjects like physics, statistics, mathematics, and biology (Lundgren 2024; Ragolane, Patel, &amp; Salikram 2024). This is particularly true when questions are objective i.e., multiple-choice or sort-answer. </P>

<P>The often mystic and evolving incredible capabilities of AI and its algorithms fuel the ‘big debate’ on intelligence about AI versus human intelligence, as discussed in this section. The question of whether AI can surpass human intelligence is explored with questions like the following: </P>

<L>
<LI>
<Lbl>• </Lbl>

<LBody>Is it possible to use technology (AI, modelling, or data science) for the good of humanity? </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Can a human mind keep up with AI? </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Can AI be humane and ethical? </LBody>
</LI>
</L>

<P>The questions lean on the protagonists who are on the side of those arguing that AI will replace humans in their jobs. The debate is not new as advancements in technology and the prevalence of automated machines, robots, and production processes, especially in the automotive industries and huge supply chains have replaced human beings. In an attempt to respond to the first and last questions, I argue that the potential of technology, especially AI and data science, to benefit humanity is substantial, yet it is not without challenges. Various studies (e.g., Beduschi 2024; Thiebes et al. 2021) highlight how these technologies can contribute positively to societal goals, but they also caution against the risks of inequality and ethical concerns. Hence, there is a quest in some parts of the literature geared towards promoting responsible and sustainable AI in protection of human rights against bias and discrimination in algorithms, whether intentional or otherwise (Lamchek 2023; Yadav, Tudela, &amp; Marco-Lajara 2024). With reference to the second question, sections 2 and 3 focused on the capabilities of AI and its impact in various sectors such as health, education and labour market among others. </P>

<P>It is undeniable that AI has transcended and has been transformative in enriching the human experience. Contemporary advancement in AI and its applications in various industries and sectors of the economy is unprecedented. In this section, the impact of AI in the health sector is highlighted. The healthcare sector experienced outstanding AI advancements that are on the brink of revolutionising patient care and outcomes. Advancing medical diagnostics through image recognition and predictive analytics, enabling early disease detection, and personalised treatment plans are some of the developments AI has advanced in the industry. The influence of AI in the health sector was evidenced in 2012 when Geoffrey developed a neural network at the University of Toronto with two of his students. The system they have built, was able to mimic humans’ behaviour and function just like the human brain (Apperly &amp; Butterfill 2009:953) where one could feed it with a large amount of data, and through ML it could learn how to identify objects with almost near perfect efficacy. </P>

<P>The working paper of the ILO (International Labour Organization) on the global analysis of jobs that are exposed to AI, specifically Gen-AI, projects that the effects vary across countries based on their income level (Gmyrek, Berg, &amp; Bescond 2023). For example, countries with low-income levels have 0.4% of their total labour force exposed compared to 5.5% of countries in the upper bound. Administrative jobs that are clerical in nature are the most exposed at 24%. Meanwhile, other occupational groups vary between one and four percent (Gmyrek Berg, &amp; Bescond 2023:110). Along the same lines, the OECD (Organisation for Economic Cooperation and Development) (OECD 2023:42) suggests that AI is changing the jobs landscape and estimates that 42% and 47% of automated jobs (AI adoption) in finance and manufacturing respectively, were motivated by reduction in staff costs. </P>

<P>The quote by Kevin Kelly, co-founder of Wired magazine in his book, The inevitable shares rather interesting views on the future of jobs in the world of AI: ‘Your job won’t be taken by AI, but it will be taken by someone who uses AI’ (Kelly 2016:2). At the core of Kelly’s view is that AI is not here to replace people in their jobs, however, there is a need for adaptability and an urge for self-fulfilment and motivation in one’s career development. Likewise, in an interview with CNBC (2023), Elon Musk, the CEO of X, a leading social media platform previously known as Twitter, has advised people, youths in particular, to embark on a career journey that keeps them captivated and always satisfied. This background in a way opines that there are varied views on the pros and cons of AI evolution. Now that the impact of AI has been discussed alongside the great debate on AI versus humans, in the next section the decussation of AI and academic integrity is presented. </P>
</Sect>

<Sect>
<H4>AI on Academic Integrity Decussation: Plausibility for Evolution of Academic Dishonesty and Academic Misconduct </H4>

<P>There is no universal definition of academic dishonesty, just as there is none for plagiarism. Both the concepts of academic dishonesty and academic misconduct are often used interchangeably, referring to the acts of violating academic integrity (Eaton 2017:272). Alongside plagiarism, the concepts have dominated ethical discourse in IHEs (Hua 2023:160). Eaton (2017:272) continues to suggest that academic dishonesty and misconduct are regarded as forms of an academic integrity antithesis. This affirms what the ICAI (International Centre for Academic Integrity) on the fundamental values of academic integrity stated, viewing both academic dishonesty and misconduct as compromising essential ethical principles and values of trust, honesty, responsibility, fairness, and respect (ICAI 2021:4-10). Tracey Bretag, the Australian scholar, who is popular for, and an avid advocate of academic integrity (cf. Marais 2022:57), borrows from the definition of APFEI (assessment of process for efficiency improvement) of academic integrity as follows: ‘A commitment to key values of honesty, trust, fairness, equity, respect and responsibility, and the translation of these values into action’ (Bretag 2016:28). </P>

<P>The popularity of the term ‘academic integrity’ is credited to Donald McCabe, Rutgers Business School’s Professor of Management and global Business, who is dubbed to be its father and to some known as ‘Dr. Ethics’ (Rutgers Business School 2014). In his summer reflection on liberal education, McCabe (2005:26) writes that ‘it takes a village: Academic dishonesty and educational opportunity [and argues that] if we truly believe in our role as educators, we would do better to view most instances of cheating as educational opportunities.’ There are other prominent scholars who have made and some still are making an immense contribution to the field of academic integrity.2 Enacting behavioural principles and values is the backbone of academic integrity, while the aim is to avoid acts of plagiarism and contract cheating, as Clarke and Lancaster (2006:19) as well as Eaton (2018) put it. In affirmation, Kirk (1996:16) continues to argue that maintaining academic integrity upholds high standards of honesty and rigour in academic research and publishing. </P>

<P>2 Examples are Tracey Bretag and Cath Ellis, both from Australia, </P>

<P>Tricia Bertram Gallant from the US, Sarah Elaine Eaton from Canada, </P>

<P>Thomas Lancaster from the UK, and Tomáš Foltýnek from the Czech </P>

<P>Republic. </P>

<P>On the backdrop of proliferation in AI, the growing importance of protecting and maintaining the practice of a moral or principled code of ethical behaviour, which is the core value of academic integrity is concomitantly taking centre stage (Marais 2022:57). Since the COVID-19 (Coronavirus disease of 2019) pandemic, IHEs across the globe had to, willingly and unwillingly, transition to online assessments (Perkins 2023:1 of 24). Inevitably, this transition was accompanied by concerns on the rising prevalence of academic dishonesty. To comprehend the complexity of academic dishonesty, it is important to note the origins of assessments, which date back as far as the primitive era of the first tests3 in the 18th century (Lang 2013:14). Time-honoured methods such as plagiarism, unauthorised collaboration, and exam cheating remain prevalent challenges. Students have, historically, sought to gain an unfair advantage through these means, raising concerns for educators and institutions globally. The use of crib sheets, copying from peers, and recycling assignments have been persistent issues, emphasising the need for ongoing vigilance in upholding academic integrity (Armstead 2001:10; Stevens 2013:17). </P>

<P>It has been argued that academic integrity tends to place the responsibility of good ethical conduct on the individual, which, according to Fishman (2015:13-16) in her book chapter on Academic integrity as education concept: Concern, and movement in US institutions of higher learning assumes that approach to be problematic. Other scholars point to the Chinese civil service over a millennium when the Chinese emperor in early 2200 BCE administered an elementary form of testing for his officials once every three years as a measure of assessing their fit-for-office (Bushway &amp; Nash 1977:4). In the context of the US, Gregory (2004:59) documents the late 1800s to be the era during which tests were popularised. James McKeen Cattell was credited to have imported brass testing instruments to the US; hence he </P>

<P>According to Fishman (2015:7-8), the historical evolution of academic integrity shaped American approaches to academic integrity and can be traced back to the early conceptions, which were based on British </P>

<P>higher education models. These models explicitly addressed ethical and moral lessons through explicit, religious beliefs and mandatory </P>

<P>practices. </P>

<P>earned an accolade of being referred to as ‘undisputed dean of American psychology’ (Gregory 2004:60-61). This long and distinct period of history was preceded by the Francis Galton era (1822-1911). Galton who was reported to have been obsessed with measurements, devised the first battery of tests within the field of psychology in Great Britain. Akin to the introduction of any other system, administering the assessments was not without challenges, and this led to testing environments needing refinements and modifications over centuries. It was until this time that written exams were enacted in the Han dynasty. Some of the challenges relates to unnecessary demanding assessment practices and invalidated selections tests (Gregory 2004:57). Other challenges include cheating and bribing of examiners, which were popular despite these acts carrying the death penalty as a form of deterrent for both examiner and test taker should it be proven that they are guilty of misconduct (Bushway &amp; Nash 1977:623; Gregory 2004:57-59; Zhang 2017:153). The next section takes a closer view on the contemporary challenges in academic integrity from the context of pre- and post-COVID-19 pandemic. </P>
</Sect>

<Sect>
<H4>Global Perspectives on Academic Integrity </H4>

<P>This section explores the contemporary issues surrounding the management of academic integrity, looking at both the conventional forms and evolving forms of academic dishonesty. Additionally, we examine this through contrasting the global and local perspectives on academic integrity. The focus of this exercise is on selected developed countries contrasted with emerging nations like Brazil, India, and South Africa. The aim of this comparative analysis is to shed light on the complex consequences of academic integrity issues in various learning environments and educational landscapes. </P>

<P>The challenges in maintaining are on the one hand precipitated by perspectives on the development of academic integrity measures, while on the other there is the role played by technology. On the former, Fishman (2015:8) attributes and relates challenges to the narrow view of academic integrity. Authors go further to suggest that there needs to be a paradigm shift away from students as possible threats on academic integrity, towards focusing on educators and researchers, practices, and entrenched institutional ethical cultures. The shortcoming of this one-dimensional perspective rests on the responsibility of managing academic integrity on individual choices, hence the inefficiencies. Therefore, a multi-dimensional perspective of responsibility of managing academic integrity, a principle which this chapter is underpinned on, remains the effective and responsible way of managing academic integrity. </P>

<P>Developed nations like the US, UK (United Kingdom), Italy, and Australia often face challenges associated with technology-driven academic misconduct. For the longest time, academic integrity has been the anchor behind fair, valid, and reliable education system. The assurance of fair and honest representation of one’s work in assessments underpins the basis for principles of academic integrity. Sadler (1989:119), in the theory of formative assessment, suggests that there are multiple criteria applied in judging the quality of students’ assessments. It is the ‘quality’ in this assessment that is a beacon of hope when it comes to protecting academic integrity. However, owing to evolutions in the educational landscape, the course or journey to manage and maintain academic integrity, is by extension raddled with allied challenges. </P>
</Sect>

<Sect>
<H4>Current Challenges in Academic Integrity </H4>

<P>Using AI tools does not automatically constitute academic dishonesty. Eaton (2023:3 of 10) uses the six tenets of post-plagiarism by reflecting writing in the age of AI and argues that attempts to detect where the human ends and where the AI begins in this era is pointless. Examining academic integrity on a global scale reveals a spectrum of practices and attitudes. The proliferation of essay mills and the use of advanced cheating technologies pose threats to the integrity of assessment processes. On the other hand, developing countries such as Brazil, India, and South Africa grapple with issues rooted in resource constraints and varying levels of academic support. The implications for educational institutions in these diverse contexts are profound, necessitating tailored strategies to address the unique challenges each faces. </P>

<P>Academic integrity literature provides valuable insights into the implications and challenges in academic integrity, both in developed and developing nations. In both the US, Canada, and the UK, studies highlight the prevalence of contract cheating and the need for rigorous academic policies (Eaton 2018; Eaton, Rothschuh, Fernández Conde, Guglielmin, Otoo, Wilson, &amp; Burns 2018:2; Eaton 2017:271; Lancaster 2020:1-2 of 14; Stone 2023:357). Perkins (2023:2 of 24) suggests that in Italy and Australia the focus has shifted towards the impact of cultural attitudes on academic honesty and considerations of academic integrity in the era of AI LLM models (Ellis, Van Haeringen, Harper, Bretag, Zucker, McBride, Rozenberg, Newton, &amp; Saddiqui 2020:454; Bretag 2016:8-10). Conversely, in India and South Africa, the reviewed research underscores the role of socioeconomic factors in influencing academic misconduct (Marais 2022; Mwamwenda 2006; Vandana &amp; Nagaveni 2019). </P>

<P>Key implications gleaned from the literature include the urgent need for standardised global frameworks that acknowledge the contextual differences between developed and developing nations. Institutions must not only adopt proactive measures to deter academic misconduct but also foster a culture of academic honesty through educational initiatives. As we navigate the post-COVID educational landscape, understanding these implications becomes paramount in crafting effective strategies to safeguard academic integrity worldwide. </P>
</Sect>

<Sect>
<H4>The Costs of the Rise in AI’s Large Language Models Authoring Tools on Academic Integrity </H4>

<P>The advent of technology has ushered in a new era of challenges for academic integrity. While traditional forms of cheating persist, there is a notable evolution in the methods employed. According to Epstein and Hertzmann (2023:1110), the capability and influence of Gen-AI tools lead to reimagining the creativity, as such, posing a need to understand the impact as it cuts across many sectors of the society. The HE landscape, and by extension, academic integrity is no exception. For example, one of the concerns raised against LLMs is their ability to generate text that may be difficult for academics to detect that it is machine-generated (Perkins 2023:8 of 24; Wahle, Ruas, Kirstein, &amp; Gipp 2022:952). Another challenge is the difficulty with which to establish relevant policies if the use of LLMs as a tool for student cognitive offloading is considered. LLMs are transformer-based ML models characterised from language-based activities (Vaswani, Shazeer, Parmar, Uszkoreit, Jones, Gomez, Kaiser, &amp; Polosukhin 2017). In this section, the key question under exploration is, ‘To what extent does the rise in AI authoring tools compromise academic integrity?’ </P>

<P>The evolution of AI academic authoring tools has its own ‘the good and the bad’ story. Much as there are benefits – intended or unintended – there are also associated consequences or costs. These costs are propelling the debates and arguments for and against embracing AI. Grappling with the challenges brought by the rise in AI authoring tools, universities and other IHEs around the world have recently developed (and are still developing, whilst others are yet to develop) their policy stances on the usage of AI authoring tools. On the backdrop of the downside of the rise in AI authoring tools, the likes of the AWG (Australian Writers’ Guild) have in its response to this explosion, released a statement where the association vehemently affirms its stringent stance against the unregulated use of AI within Australia’s creative industries (AWG 2023). Similarly, the WGA (Writers Guild of America) has tailed its concerns and issues they have on AI within the US screen industry, and this went as far as the protracted protests. The WGA ultimately won critical contract protections, although this victory was not enough, according to Winter (2023). Despite the critics, at the core of the settlement, it was agreed that AI would not be used to weaken writers’ credits and compensation. In another prominent example of concerns against AI, the technology reporter of the BCC (British Broadcasting Corporation), Shiona McCallum wrote that ‘ChatGPT [was] banned in Italy over privacy concerns’ (McCallum 2023). Unexpectedly and ironically, the headline would coincide with the April fool’s day, and uncertainly some would wonder if this was a gimmick. However, it was found to be a true story. As reported in the article, the data protection authority in Italy was grimly concerned about privacy of the data on the chatbot’s model (McCallum 2023). </P>

<P>To borrow from the economics literature, there is a popular phrase about the hidden or opportunity costs that states: ‘There ain’t no such thing as a free lunch’, to which Safire William (1993) argues that it was popularised by the free market economist Milton Friedman in 1975 (Friedman 1975). This phrase puts meaning and understanding to the existence and impact of costs in any transaction. Brue and McConnell (2014:3) put it succinctly as they write that the phrase is ‘at the core of economics.’ It is undeniable that much as there are benefits brought by AI authoring tools, there are also concomitant costs. The impact of the LLMs on academic integrity is complex and is continuously evolving. This is partly attributed to unremitting development and deployment of new tools. For example, Poola (2023:16) states that with the development of new technologies and advancement in AI, the efficiency and effectiveness of LLMs such as ChatGPT is likely to be impacted. This is more of a paradox situation with positive and negative potential consequences and will be discussed in the next sections. </P>
</Sect>

<Sect>
<H4>Positive Benefits of AI Authoring Tools </H4>

<P>The rise of AI authoring tools represents a transformative wave that is reshaping the landscape of academic inquiry across diverse disciplines (Currie 2023:723). Notwithstanding the negative consequences of AI authoring tools which are discussed in the next section, the positive impact of AI on academic integrity leans towards supporting and enhancing the individual’s learning experience. The focus of the next two sections is to demonstrate how AI can be positively used to support learning. This is followed by a discussion on how AI systems can collaborate with human beings. </P>

<Sect>
<H5>AI Support for Teaching and Learning </H5>

<P>Much of AI proliferation and transformation has simplified and made the job of educators in an efficient and effective way easy (Pokrivcakova 2019). The application of AI-related systems and tools in education through an integration of VR (virtual reality) in mobile devices has also transformed the way in which these devices are used for learning. The added benefit of this transition is personalised and improved learning experiences for students. Robotic teaching, AWT, APT, reading and pacing tools, as well as automatic grading are some of the pioneering examples as to how AI has permeated the education space (Neha 2020:305). Ordinarily, AI tools and systems, especially AI authoring tools are imitating human behaviour, in particular human intelligence. Kaartemo and Helkkula (2018:212) hold a similar view as they state that AI machines and systems carbon copy humans’ cognitive abilities, and their advantage is that their ‘intelligence’ is programmable to be able to execute quite complex tasks. For researchers, LLMs offer a variety of potential benefits. For example, they amass an influential potential to be valuable tools for organising literature reviews as they can aid finding relevant sources (e.g., Litmaps, 
<Link>perplexity.ai</Link>
, and Connected Papers). In addition, these tools also offer the benefit of being able to understand complex concepts and stimulate the user’s creativity process through idea-generation. </P>

<P>The launch of ChatGPT has catalysed a rapid rise in open AI technologies, mainly Gen-AI (Currie 2023:719). These capabilities have revolutionised and redefined teaching, learning, and assessment practices in HE space. Yeo (2023) puts it succinctly that ChatGPT is debatably the most erudite free-to-use AI authoring tool that affords the benefit of being able to generate large sums of text or write-up with less effort required of the author. Hellman (2019) points to augmented writing, denoting to academic writing tools that allow authors to basically pose a question or title, and the outcome is generated text or content. This process of simply posing a question and getting a response in the form of generated text relates to prompt engineering. Additionally, there are other AI tools with text completion features and the capability of writing a whole essay (Yeo 2023). </P>

<P>The sheer capability of Gen-AI tools and chatbots is founded on GPT 3 (generative pretrained transformer 3) models. AI writing tools and digital writing assistants like ChatGPT, Google Bard, and Claude POE have the capability to automate or aid with ideas during the write-up process. These tools use ML algorithms to generate human-like text, responding to the users’ text-based prompts. In line with the GIGO (garbage in, garbage out) metaphor, Poola (2023:16) cautions that the quality of ML algorithm responses is equally dependent on the quality of the prompts. The same sentiments are shared by Bouchard (2022) on the efficacy of ChatGPT who argues that its efficiency relies on the inputted prompt. With advancements in AI and the introduction of new tools, prompt engineering is regarded as the most after-sought skill. In support of this claim, Meskó (2023:2 of 6) narrates that one of the emergent skills requisites of medical professionals is that of prompt engineering. The strength of effective prompting models lies in DL capabilities, which are built on ML and NLP capabilities (Fan, Chen, Li, Sun, Feng, Hassanin, &amp; Sareh 2021:3955). </P>

<P>To protect the academic integrity of assessments, especially at a postgraduate level, IHEs have turned their focus to plagiarism and AI-detection mechanisms. By doing this, they have considered increasing concerns and critics over threats to creativity and critical thinking, security, and ethical issues, and most importantly an overreliance on AI, specifically the use of AI authoring tools. Regardless of these developments, the effectiveness, ethical implications, and challenges attending to the use of these AI technologies in safeguarding academic honesty remain ambiguous. The measures that can be taken to strike a balance between the usage of AI authoring tools and maintaining academic integrity will be discussed below. In the next section, the possibility of human and AI co-creation is explored. </P>
</Sect>

<Sect>
<H5>Potential for AI Systems and Machines Facilitating Co-Creation With Humans </H5>

<P>Drawing from the earlier discussion above on the debate between AI and human intelligence, and the former standing to replace humans, the question that is explored in this section is whether co-creation between AI systems and human beings is possible. AI is known to have revolutionised the field of education, as it is known to have been applied in disciplines like physics, finance, mathematics, and English (Sharma, Kawachi, &amp; Bozkurt 2019:1). </P>

<P>The distinctive key feature of AI lies in its intimate intelligence (Neha 2020:305-306), which previously has been exclusive to only humans until the prevalence of AI systems (Chassignol, Khoroshavin, Klimova, &amp; Bilyatdinova 2018:16-17; Coppin 2004:45; Whitby 2008:28). </P>

<P>AI is considered to be one of the most debated subjects in the contemporary era (Korteling, Van de Boer-Visschedijk, Blankendaal, Boonekamp, &amp; Eikelboom 2021) and its capabilities are undoubtedly beyond belief, thereby intensifying the discourse on the comparison between human and AI abilities. In relation to this debate, there have been reports on AI replacing humans (Neha 2020), thus ultimately/possibly causing human extinction. Despite these predictions, the literature on AI versus humans (Dong 2023; Mu 2023; Korteling et al. 2021; Venugopal &amp; Mamatha 2023) suggests that this displacement is not conclusive, however, terms such as ‘possible,’ ‘potential,’ and ‘might’ are mentioned, thus signalling the existence of such likelihood of replacement. Mboti (2023:2) reflects on replacement perceptions by asking whether this is all in the name of hype. The author concludes that the sentience is rather mistaken, pointing to the fundamental misunderstanding of what it means to be human (Mboti 2023:2). This view corroborates Dong’s point on AI capabilities in education that as much as it may sound plausible that AI stands to replace educators (Dong 2023), the AI’s misgivings are that it does not possess similar emotions to those of human beings. Therefore, the possibility is co-creation where humans would add these emotions to enhance the performance of AI systems in the future. </P>

<P>Russell and Norvig (2021:1005) point out that as much as AI’s superintelligence is potentially on the horizon, advancements in technology, in particular AI cannot be absolute exponential growth, thus exhibiting the S-shape. This mere shortcoming of AI suggests that coexistence with humans is still needed for it to be more perfected. This era of synchronicity may lead to transhumanism, a term which was initially predicted by Moravec in 1988 in his book titled Mind children (Moravec 1988). However, Julian Huxley, a biologist and philosopher is credited to have first used it in his 1957 academic work on directed improvements in human evolution (Huxley 1957:13). The tenets of transhumanism can be traced to the work of the English professor, novelist, and non-fiction writer Robert Ettinger4 and Fereidoun Esfandiary, who are touted as the ‘early pioneers’ (Kamiya 2023). Moravec is a renowned specialist for his work on robotics (cf. Moravec 1988). Thomas Fuchs in his transhumanism critique chapter titled Beyond the human states the following: ‘While biological humanity is slowly dying out, humans will ensure the continued existence of their minds through computer simulation’ (Fuchs 2021:49). Fuchs’ view illustrates the possibility of human and machine merging and collaboration, which is the essence of co-creation. </P>

<P>With the talks of human extinction possibilities, AI protagonist leaders in the form of hundreds (more than 350) of AI experts and professors pledged their signatures on the Statement on AI Risk (Centre for AI Safety 2023). The statement was preceded by an open letter cautioning against the risks of AI. Roose (2023) reports that those who signed varied from top executives to the key role players of the leading AI corporates which include the CEOs of OpenAI, Google, and Anthropic. With these high profile individuals, this indicates the extent to which the AI threat is creating uncertainty on the future. The signatory pledge was driven by the Centre for AI Safety, which is a non-profit establishment (Centre for AI Safety 2023). These concerns delve into the responsible AI domain whereby ethical use and trust in AI are of paramount importance. </P>

<P>Wu, Ji, Yu, Zeng, Wu, and Shidujaman (2021), the proponents of AI co-creation, suggest an AI creativity and human-AI co-creation model after analysing more than 1,600 application cases of AI across 45 areas. By means of this model, the authors have coined and introduced the ‘AI creativity’ concept. The argument for their model is that it expounds the creative process in the AI era and that there are new opportunities and possibilities for collaboration with humans, enabled by AI. </P>

<P>4 Ettinger is the well-known father of cryonics, a process of preserving the dead in a frozen liquid nitrogen, with the goal of revival later. Amongst his published work, the two notable books related to his work on cryonics, are The prospect of immortality (Ettinger 1965) and Man in superman: The startling potential of human evolution – and how to be part of it (Ettinger 2005). </P>

<P>Within the IHE space, Robayo-Pinzon, Rojas-Berrio, Rincon-Novoa, and Ramirez-Barrera (2023:17 of 17) reflect on the relevance of AI creativity and value the co-creation process stating that this also garners support from students. They propose that more conceptual studies are necessary to understand and leverage the associated benefits. Other parts of AI and human interaction literature refer to HCI (human-computer interaction) to denote the merger and collaboration between humankind and machines. For example, Kurosu (2021:v) in his HCI theory, methods, and tools book comprised of conference papers reflects that the HCI field is gaining evolving attention and importance from both the scientific and industrial communities. Zaphiris and Ioannou (2021) further elaborate that in the 8th international conference held in 2021, over 5,000 individuals from various fields across 81 countries – academic sectors, research institutions, industry players, and public (government) sectors – submitted 1,276 papers and 241 posters. </P>

<P>Whereas AI is here to stay, it needs the human element to assist it, especially to develop and demonstrate empathy. This view resonates well with Dong’s perspective on AI and emotions as stated earlier (Dong 2023). The other important consideration on human and AI collaboration is on understanding issues of explainability and interpretability. These two principles rest within XAI (explainable artificial intelligence), which is a branch of AI alongside Gen-AI and responsible AI, all explored further in this chapter on AI and big data in education. Therefore, as our everyday lives and activities are influenced by AI and we are slowly moving away to the digital space, away from the physical world, collaboration rather contestation between humans and AI is likely to influence the future, both of AI and humans. It is crucial that ethical boundaries in this development remain uncompromised above all. </P>
</Sect>

<Sect>
<H5>The negative impacts of AI Authoring Tools </H5>

<P>The global transformative power of the AI landscape across the various sectors of the economy cannot be ignored. The immense potential and transformative power of AI are shaping the future of education (Khan &amp; Vivek 2022). The concomitant impact of AI’s transformation points to influencing the way we teach, learn, and do research (Takyar 2022). Gen-AI LLMs such as ChatGPT, Google Bard, Gemini, DALL, and Claude are arguably pioneers in this transformation (cf. Figure 2.4), and this is largely attributed to their text generation capability which is perceived to be better than that of the humans. These models have gone through various waves of AI development, of which the first wave was more on rule-based technologies, followed by the second wave which was characterised by the start of big data, ML, and Gen-AI. The last wave is sluggishly transcending from the era of cognitive AI into AGI (artificial general intelligence). </P>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_23.jpg"/>

<Caption>
<P>Figure 2.4: Growing capabilities of Gen-AI LLMs. (Source: Voss &amp; Jovanovic 2023) </P>
</Caption>
</Figure>

<P>Given the prominence that AI and by extension LLMs, has gained over the last few years, contract cheating has been one of the major unethical behaviours that potentially compromise academic integrity within the HE space. With this form of academic dishonesty, students engage in transactional relationships in return for tailored assignments (Medway, Roper, &amp; Gillooly 2018:413). There is a growing body of academic literature discussing students’ use of LLMs in their assessments and academic integrity, for example Eaton et al. (2018), Perkins (2023), and Stone (2023). The rise in essay mills and the easy access of online resources present new ethical dilemmas (Medway et al. 2018). This, therefore, calls for a new way of thinking regarding policy and guidelines, assessment methods and practices, as well as capacity development. The following two sections explore the concerns raised against the impact of AI authoring tools. </P>
</Sect>

<Sect>
<H5>Students’ AI Dependency and Plagiarism </H5>

<P>The Covid-19 era (and afterwards) has evoked a massive and rapid transition to online learning by IHEs across the world. This inspired more and more use of digital technology tools to enhance teaching and learning as well as research activities. Just as educators are thought to be playing a crucial role in how educational technology tools are adopted and adapted to enhance quality in learning experiences and assessment practices (Fajet, Bello, Leftwich, Mesler, &amp; Shaver 2005:717), more and more students are easily accessing these tools, whether in free-to-use or paid form. The cause for concern is that these students are increasingly becoming too dependent on AI, thus transferring the burden of responsibility to perform one’s work to technology (AI). The concomitant alarms to this dependency among others are increasing incidents of plagiarism and other forms of academic dishonesty as described by Medway et al. (2018:393). </P>

<P>The blurred lines between collaboration and plagiarism in digital environments add layers of complexity to the landscape. As education increasingly embraces online platforms, IHEs ought to grapple with how to adapt their policies to address these emerging challenges effectively. Yeo (2023:2 of 18) points to the issues of equity and access which are being associated with the AI authoring tools proliferation. The paradox in these costs is that the outcome </P>

<P>– intended or unintended (Motala 2018:15) – may be costly. </P>

<P>With electronic journals available in their plenitude, enhanced by the rise in AI tools, plagiarism can be supported inadvertently as students can easily access papers online, use services of writing companies, thus enabling ghost-writing to thrive (Walker 2010:43). Signifying the students’ dependency on AI, Perkins (2023:5 of 24) warns of inaccurate and falsified reference lists and citations, thus recommending the need to doublecheck and verify their veracity. According to Zhang (2020), most of AI AWE (automated writing evaluation) systems for providing students’ feedback are equally gaining prominence in research writing space and are amassing clear benefits over human-geared reviews and assessments. AWEs like other AI authoring tools and systems such as DWAs (digital writing assistants), AAGs (automated article generators), and APTs (Roe &amp; Perkins 2022) were initially geared for commercial enterprises for SEO (search engine optimisation) but were consequently adapted to support student learning (Perkins 2023:3 of 24). </P>

<P>While transition to online learning has brought some benefits, undue consequences, whether intended or otherwise have surfaced. On the downside, Stone (2023:358) underscores this point by stating that this transition has opened doors for more breaches and compromising of academic integrity, evidenced by an increasing reporting of acts of academic dishonesty. With LLMs within reach, students can easily put their reliance on AI for their thinking – argumentative and critical thinking. The usage of the AI authoring and LLM tools if not properly managed and regulated may increase chances of academic dishonesty, like plagiarism. </P>

<P>Plagiarism is a longstanding global ethical challenge troubling IHEs, given the disruptive behaviour it possesses on teaching and learning and knowledge transmission (Eaton 2017:271; 2023:2 of 10; Jones &amp; Sheridan 2015:712; Thomas &amp; De Bruin 2015:1-2 of 3; Walker 2010:42). Perkins, Gezgin, and Gordon (2019:5) define plagiarism as an act of misrepresentation of efforts carried out by the original authors. Another interesting definition of plagiarism is that of Sutherland (2008:11) who regards plagiarism as an act of using one’s intellectual work by kidnapping their ideas without relevant sources of reference. Although some students plead ignorance or that they were unintentional in committing it (Stone 2023:258), academic integrity is threatened, and by extension IHEs, academic staff, and students themselves suffer reputational damage (Currie 2023:720), with IHEs’ programmes and qualifications under scrutiny (Perkins 2023:1 of 24; Stone 2023:358-359). This, according to Pecorari (2008:44; cf. Mahabeer &amp; Pirtheepal 2019:2) may potentially lead to students’ work and intelligibility being questioned and rejected. Plagiarism is assumed to be taking the edge off one’s thinking, creative abilities, and originality (Jones &amp; Sheridan 2015:715). </P>
</Sect>

<Sect>
<H5>Reduced Human Interaction and Communication Threat </H5>

<P>The non-debatable capacity of AI systems leans towards the ability of these systems to enhance individual experiences and to personalise digital services (Leslie 2019:6). Although these capabilities hold the potential, risks are potentially inherent. With more and more excessive automation, the dependency on AI systems and tools increases, thus threatening the need for human-to-human interaction. From a research perspective, a communication gap between educators and students is also a possibility, and this may well polarise social relationships. Leslie (2019:6) argues that ‘well-ordered and cohesive societies are built on relations of trust, empathy, and mutual understanding.’ Therefore, as AI systems become more and more available, it is vital for human beings to preserve these relations. Trilling and Fadel (2009:44-45) identify the following skills as key learning and innovation as well as life and career skills needed for the future: </P>

<L>
<LI>
<Lbl>• </Lbl>

<LBody>Critical thinking and problem solving; </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>creativity and innovation; </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>collaboration and communication; </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>flexibility and adaptability; </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>initiative and self-direction; </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>social and cross-cultural; and </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>leadership and responsibility. </LBody>
</LI>
</L>

<P>These skills remain highly relevant in a contemporary and dynamic job market and education characterised by AI revolution (Almazroa &amp; Alotaibi 2023; Timenko 2021). Furthermore, these skills are considered requisite competencies in a publication by the WEF (World Economic Forum) reflecting 21st-century skills that every student needs (WEF 2016). The report goes further to state that SEL (social and emotional learning) is a crucial driver to the development of these skills and possessing them will enable students to be successful in an ever-evolving digital economy (WEF 2016). Therefore, with an over-reliance on technology and by extension, AI systems and tools, the development of these important skills is at stake, just like the economic inclusion and participation of students of the future. </P>
</Sect>

<Sect>
<H5>Striking a Balance Between AI Usage and Maintaining Academic Integrity </H5>

<P>Notwithstanding these revolutionary benefits amid the IHEs’ endeavour to embrace and manage the abundance of AI technologies and related tools, preserving academic integrity remains a paramount ethical issue. Despite the potential prospects, this proliferation comes with associated risks and challenges. One of the challenges relates to the balance between creativity (ML/automation) and integrity (human interaction) in education. This is where ethics intersects creativity and advancements in technology. </P>

<P>This implies that AI advancement has concomitant significant ethical concerns, which if not well-managed or taken care of, will erode, if it does not compromise the integrity of the field – from assessments, teaching and learning methods, a governance process and policies, and most importantly the knowledge creation process. With this context in mind, the nuanced AI-driven ethical challenges on academic integrity are explored here, emphasising the need for a thoughtful and principled approach. </P>

<P>While there is a need to embrace AI given its benefits and the paradigm shift it has caused, there is a need to also strike a balance in an endeavour to maintain academic integrity. This should be done without suffocating the technology. Stone (2023:357), in her paper on student perceptions on academic integrity cites that academic integrity is increasingly becoming an important aspect for IHEs, however maintaining it, it has equally become a serious issue. Without a doubt, advancements in AI have contributed to this struggle. The bottom line is that there should be a balance for safe usage. The AI governance, transparency and accountability, content ownership, fact-checking, and AI ethical usage as some of the principles that form the cornerstone of this balance are discussed below. </P>
</Sect>
</Sect>

<Sect>
<H4>Future Trends: Evolution of AI and Related Challenges </H4>

<P>The future of AI and its continuous advancements remain uncertain. This evolution is dynamic in nature and will undoubtedly continue to bring many surprises. In turn, the future of academic integrity stands at a crossroads, marked by the transformative influence of AI and the challenges it introduces. As we propel into an era characterised by reduced human interaction, communication threats, students’ increasing dependency on AI, and ethical concerns stemming from heightened academic dishonesty, it becomes imperative to recalibrate our strategies and fortify the foundations of academic integrity. </P>

<P>The reduction in human interaction, exacerbated by the widespread adoption of digital platforms and automated assessment tools, poses a communication threat that widens the gap between educators and students. The traditional channels for guidance and mentorship may dwindle, necessitating innovative approaches to foster meaningful connections. Educators and institutions must proactively leverage technology to bridge this gap, establishing clear lines of communication, virtual support systems, and avenues for students to seek guidance and feedback, especially on clarifying the difference between plagiarism and poor referencing techniques (Stone 2023:372). </P>

<P>The growing dependency on AI among students raises concerns about diminishing capacities for critical thinking. As AI tools streamline processes and offer ready-made solutions, there is a risk of stifling the development of essential analytical and problem-solving skills. The future demands a recalibration of educational approaches to strike a balance between leveraging AI for efficiency and preserving the space for students to cultivate critical thinking. Encouraging inquiry-based learning, complex problem-solving, and projects that require creative thought will be crucial in nurturing students’ intellectual independence in attempting to overcome transparency issues in the current available LLMs, alternative open source LLMs known as OPTs (open pre-trained transformers) are being developed as they boost the advantage of a text generation ability and are more open to scrutiny (Perkins 2023:7 of 24). </P>
</Sect>

<Sect>
<H4>Conclusion </H4>

<P>In the face of changing educational environments and advancements in AI, the survival of academic integrity depends on the current and future generation’s capacity to adjust, create, and preserve instead of subverting moral principles. To fully utilise AI’s benefits while preserving the values that guide academic success, a deliberate effort is needed. There is a need to create an educational future that is both technologically advanced and morally sound. This involves tackling issues presented by a worryingly mounting students’ dependency on AI, less human interaction, communication, and ethical gaps. The journey to maintaining academic integrity in the AI age remains a dynamic call that requires ongoing self-examination, adjustment, and a resolute dedication to the values that constitute the core of intellectual endeavours. Eaton (2023) thus argues that we need to be talking about post-plagiarism and reflects on ethics and integrity in the era of AI. </P>

<P>Equally so, the ethical threat posed by increased academic dishonesty in the digital age demands a proactive and multifaceted response. IHEs must not only employ advanced technological tools for plagiarism detection, as they need to up their investment game as far as comprehensive educational programmes that instil a strong sense of academic ethical culture are concerned. Fostering a culture of integrity, emphasising the value of original thought, and equipping staff – students, educators, and management – with the skills to navigate the ethical challenges posed by AI are essential steps for mitigating the possible threat against academic integrity. </P>
</Sect>

<Sect>
<H4>References </H4>

<P>Almazroa, H. &amp; Alotaibi, W. 2023. Teaching 21st century skills: Understanding the depth and width of the challenges to shape proactive teacher education programmes. Sustainability 15(9). 
<Link>7365. 25 pages. https://doi.org/10.3390/su15097365 </Link>
</P>

<P>Apperly, IA. &amp; Butterfill, SA. 2009. Do humans have two systems to track beliefs and belief-like states? Psychological Review 116(4):953
<Link>970. https://doi.org/10.1037/a0016923 </Link>
</P>

<P>Armstead, PK. 2001. What factors affect cheating in secondary school and why? PhD thesis, Department of Psychology, University of Plymouth, England. </P>

<P>Arrieta, AB., Díaz-Rodríguez, N., Del Ser, J., Bennetot, A., Tabik, S., Barbado, A., García, S., Gil-López, S., Molina, D., Benjamins, R., &amp; Chatila, R. 2020. Explainable artificial intelligence (XAI): Concepts, taxonomies, opportunities and challenges toward responsible AI. Information fusion
<Link> 58:82-115. https://doi. </Link>

<Link>org/10.1016/j.inffus.2019.12.012 </Link>
</P>

<P>AWG (Australian Writers’ Guild). 2023. Australian Writers’ Guild rejects unregulated AI in the creative sector. 28 September 2023. 
<Link>Available at: https://www.awg.com.au/News-Details?newsId=8 </Link>

<Link>93&amp;title=Australian-Writers-Guild-rejects-unregulated-AI-in</Link>

<Link>the-creative-sector</Link>
. (Accessed on 25 December 2023). </P>

<P>Beduschi, A. 2024. Synthetic data protection: Towards a paradigm change in data regulation? Big Data &amp; Society
<Link> 11(1):20-53. https://doi. </Link>

<Link>org/10.1177/20539517241231277 </Link>
</P>

<P>Bouchard, L. 2022. Prompting explained: How to talk to ChatGPT. 12 
<Link>December 2022. Available at: https://www.louisbouchard.ai/ </Link>

<Link>prompting-explained/</Link>
. (Accessed on 2 January 2024). </P>

<P>Bretag, T. 2016. Handbook of academic integrity. Singapore: Springer. 
<Link>https://doi.org/10.1007/978-981-287-098-8 </Link>
</P>

<P>Brue, S. &amp; McConnell, C. 2014. Essentials of economics. New York: McGraw Hill. </P>

<P>Bushway, A. &amp; Nash, WR. 1977. School cheating behaviour. Review of Educational Research
<Link> 47(4):623-632. https://doi. </Link>

<Link>org/10.3102/00346543047004623 </Link>
</P>

<P>Carter, J., Sandall, J., Shennan, AH., &amp; Tribe, RM. 2019. Mobile phone apps for clinical decision support in pregnancy: A scoping review. BMC Medical Informatics and Decision Making
<Link> 19(1):1-13. https://doi. </Link>

<Link>org/10.1186/s12911-019-0954-1 </Link>
</P>

<P>Carvalho, L. &amp; Yeoman, P. 2021. Performativity of materials in learning: The learning-whole in action. Journal of New Approaches in Educational Research
<Link> 10(1):28-42. https://doi.org/10.7821/ </Link>

<Link>naer.2021.1.627 </Link>
</P>

<P>Centre for AI Safety. 2023. Statement on AI risk: AI experts and public figures express their concern about AI risk. Centre for AI Safety. 
<Link>Available at: https://www.safe.ai/statement-on-ai-risk. </Link>
(Accessed on 7 January 2024). </P>

<P>Chaffey, D. &amp; Ellis-Chadwick, F. 2019. Digital marketing. Harlow: Pearson. </P>

<P>Chassignol, M., Khoroshavin, A., Klimova, A., &amp; Bilyatdinova, A. 2018. Artificial intelligence trends in education: A narrative overview. Procedia Computing Science 
<Link>136:16-24. https://doi.org/10.1016/j. </Link>

<Link>procs.2018.08.233 </Link>
</P>

<P>Clarke, R. &amp; Lancaster, T. 2006. Eliminating the successor to plagiarism? Identifying the usage of contract cheating sites. Proceedings of 2nd international plagiarism conference, June 2006, 19-21. </P>

<P>CNBC. 2023. Tesla CEO Elon Musk discusses the implications of A.I. on his children’s future in the workforce. 16 May 2023. Video. Available 
<Link>at: https://www.cnbc.com/video/2023/05/16/tesla-ceo-elon</Link>

<Link>musk-discusses-the-implications-of-a-i-on-the-future-of</Link>
</P>

<P>
<Link>the-workforce.html</Link>
. (Accessed on 3 January 2024). </P>

<P>Coppin, B. 2004. Artificial intelligence illuminated. Boston: Jones &amp; Bartlett. </P>

<P>Coutinho, L. 2022. The power of the universe: How to make it work for you. The Free Press Journal
<Link>. 9 April 2022. Available at: https:// </Link>

<Link>www.freepressjournal.in/weekend/the-power-of-the-universe</Link>

<Link>how-to-make-it-work-for-you</Link>
. (Accessed on 9 December 2023). </P>

<P>Currie, GM. 2023. Academic integrity and artificial intelligence: Is ChatGPT hype, hero or heresy? Seminars in Nuclear Medicine 53(5):719-730. 
<Link>https://doi.org/10.1053/j.semnuclmed.2023.04.008 </Link>
</P>

<P>De Bruin, JS., Schuh, C., Seeling, W., Luger, E., Gall, M., Hütterer, E., Kornek, G., Ludvik, B., Hoppichler, F., &amp; Schindler, K. 2018. Assessing the feasibility of a mobile health-supported clinical decision support system for nutritional triage in oncology outpatients using Arden Syntax. Artificial Intelligence in Medicine 
<Link>92:34-42. https://doi.org/10.1016/j.artmed.2015.10.001 </Link>
</P>

<P>Delipetrev, B., Tsinaraki, C., &amp; Kostic, U. 2020. Historical evolution of artificial intelligence. Luxembourg: Publications Office of the European Union. </P>

<P>Dong, Z. 2023. Research on the impact of artificial intelligence on the development of education. Proceedings of the 2nd International Conference on Interdisciplinary Humanities and Communication Studies. </P>

<P>Eaton, SE. 2017. Comparative analysis of institutional policy definitions of plagiarism: A Pan-Canadian university study. Interchange 
<Link>48:271-281. https://doi.org/10.1007/s10780-017-9300-7 </Link>
</P>

<P>Eaton, SE. 2018. Contract cheating: A Canadian perspective. BMC. 24 
<Link>July 2018. Available at: http://blogs.biomedcentral.com/ </Link>

<Link>bmcblog/2018/07/24/contract-cheating-a-canadian</Link>

<Link>perspective/</Link>
. (Accessed on 29 November 2023). </P>

<P>Eaton, SE. 2023. Postplagiarism: Transdisciplinary ethics and integrity in the age of artificial intelligence and neurotechnology. International Journal for Educational Integrity 19(1). 23. 10 pages. 
<Link>https://doi.org/10.1007/s40979-023-00144-1 </Link>
</P>

<P>Eaton, SE., Rothschuh, S., Fernández Conde, C., Guglielmin, M., Otoo, BK., Wilson, J., &amp; Burns, I. 2018. Understanding faculty perceptions and approaches to academic integrity in a Canadian school of education. Available at:
<Link>https://files.eric.ed.gov/fulltext/ </Link>

<Link>ED581141.pdf</Link>
. (Accessed on 4 January 2024). </P>

<P>Ellis, C., Van Haeringen, K., Harper, R., Bretag, T., Zucker, I., McBride, S., Rozenberg, P., Newton, P., &amp; Saddiqui, S. 2020. Does authentic assessment assure academic integrity? Evidence from contract cheating data. Higher Education Research &amp; Development 
<Link>39(3):454-469. https://doi.org/10.1080/07294360.2019.1680956 </Link>
</P>

<P>Epstein, Z. &amp; Hertzmann, A. 2023. Investigators of human creativity. In Akten, M., Farid, H., Fjeld, J., Frank, MR., Groh, M., Herman, L., Leach, N., &amp; Mahari, R. (Eds.): Art and the science of generative AI. Science
<Link> 380(6650):1110-1111. https://doi.org/10.1126/science. </Link>

<Link>adh4451 </Link>
</P>

<P>Eren, A., Subasi, A., &amp; Coskun, O. 2008. A decision support system for telemedicine through the mobile telecommunications platform. Journal of Medical Systems
<Link> 32(1):31-35. https://doi.org/10.1007/ </Link>

<Link>s10916-007-9104-x </Link>
</P>

<P>Erguzel, TT. &amp; Ozekes, S. 2014. Artificial intelligence approaches in psychiatric disorders. The Journal of Neurobehavioral Studies 1:52
<Link>53. https://doi.org/10.5455/JNBS.1405259279 </Link>
</P>

<P>Ettinger, RCW. 1965. The prospect of immortality. London: Sidgwick and Jackson. </P>

<P>Ettinger, RCW. 2005. Man in superman: The startling potential of human evolution – and how to be part of it. New York: Routledge. </P>

<P>Fajet, W., Bello, M., Leftwich, SA., Mesler, JL., &amp; Shaver, AN. 2005. Pre-service teachers’ perceptions in beginning education classes. Journal of Teaching Education
<Link> 21(6):717-727. https://doi. </Link>

<Link>org/10.1016/j.tate.2005.05.002 </Link>
</P>

<P>Fan, W., Chen, Y., Li, J., Sun, Y., Feng, J., Hassanin, H., &amp; Sareh, P. 2021. Machine learning applied to the design and inspection of reinforced concrete bridges: Resilient methods and emerging applications. Structures
<Link> 33:3954-3963. https://doi.org/10.1016/j. </Link>

<Link>istruc.2021.06.110 </Link>
</P>

<P>Fishman, T. 2015. Academic integrity as an educational concept, concern and movement in US institutions of higher learning. In Bretag, T. (Ed.): Handbook of academic integrity, 7-22. Singapore: Springer. 
<Link>https://doi.org/10.1007/978-981-287-098-8_1 </Link>
</P>

<P>Friedman, M. 1975. There’s no such thing as a free lunch. Chicago: Open Court Publishing Company. </P>

<P>Frost, K. &amp; Sullivan, B. 2016. Transforming healthcare through artificial intelligence systems. Artificial Intelligence in Health and Life Sciences. DocPlayer
<Link>. 19 slides. Available at: https://docplayer. </Link>

<Link>net/36848717-Transforming-healthcare-through-artificial</Link>

<Link>intelligence-systems.html</Link>
. (Accessed on 30 December 2023). </P>

<P>Fuchs, T. 2021. Beyond the human? A critique of transhumanism. In Fuchs, </P>

<P>T. (Ed.): In defence of the human being: Foundational questions of an embodied anthropology, 49-82. Oxford: Oxford University Press. 
<Link>https://doi.org/10.1093/oso/9780192898197.003.0003 </Link>
</P>

<P>Ganiev, AG. &amp; Tashev, SN. 2021. The role of ‘imagination’ in the process of ‘creative thinking’ developing students’ ‘imagination’ and ‘creative thinking’ skills in teaching physics. Psychology and Education
<Link> 58(1):3569-3575. https://doi.org/10.17762/pae. </Link>

<Link>v58i1.1309 </Link>
</P>

<P>Gartner, 2023. What’s new in artificial intelligence from the 2023 Gartner 
<Link>hype cycle. 17 August 2023. Available at: https://www.gartner. </Link>

<Link>com/en/articles/what-s-new-in-artificial-intelligence-from</Link>

<Link>the-2023-gartner-hype-cycle</Link>
. (Accessed on 5 December 2023). </P>

<P>Gmyrek, P., Berg, J., &amp; Bescond, D. 2023. Generative AI and jobs: A global analysis of potential effects on job quantity and quality. ILO Working 
<Link>Paper 96. Geneva: ILO. https://doi.org/10.54394/FHEM8239 </Link>
</P>

<P>Grand View Research. 2023. Digital transformation market to reach 
<Link>$4,617.78 billion by 2030. 8 August 2023. Available at: https:// </Link>

<Link>www.grandviewresearch.com/press-release/global-digital</Link>

<Link>transformation-market#</Link>
. (Accessed on 7 December 2023). </P>

<P>Gregory, RJ. 2004. Psychological testing: History, principles, and applications. Boston: Allyn &amp; Bacon. </P>

<P>Hellman, M. 2019. Augmented writing technology: A writer’s friend or foe? The Seattle Times
<Link>. 30 April 2019. Available at: https:// </Link>

<Link>www.seattletimes.com/business/technology/augmented</Link>

<Link>writing-technology-a-writers-friend-or-foe/</Link>
. (Accessed on 30 December 2023). </P>

<P>Hua, JH. 2023. Beyond exams: Investigating AI tool impact on student attitudes, ethical awareness, and academic dishonesty in online college assessments. International Journal of Educational Management and Development Studies
<Link> 4(4):160-185. https://doi. </Link>

<Link>org/10.53378/353030 </Link>
</P>

<P>Huxley, J. 1957. Transhumanism. London: Chatto &amp; Windus. ICAI (International Centre for Academic Integrity). 2021. The fundamental values of academic integrity. 3rd ed. Albany: ICAI. </P>

<P>Jergus, K. 2018. Jean-Jacques Rousseau (1712-1778). In Smeyers, P (Ed.): International handbook of philosophy of education, 395-406. Part 1. 
<Link>Cham: Springer. https://doi.org/10.1007/978-3-319-72761-5_33 </Link>
</P>

<P>Jones, M. &amp; Sheridan, L. 2015. Back translation: An emerging sophisticated cyber strategy to subvert advances in ‘digital age’ plagiarism detection and prevention. Assessment &amp; Evaluation in Higher Education
<Link> 40(5):712-724. https://doi.org/10.1080/02602938.201 </Link>

<Link>4.950553 </Link>
</P>

<P>Kaartemo, V. &amp; Helkkula, A. 2018. A systematic review of artificial intelligence and robots in value co-creation: Current status and future research avenues. Journal of Creating Value 4(2):211-228. 
<Link>https://doi.org/10.1177/2394964318805625 </Link>
</P>

<P>Kamiya, A. 2023. Transhumanism: History, criticisms &amp; future of humanity. 
<Link>Study.com</Link>

<Link>. 8 March 2023. Available at: https://study. </Link>

<Link>com/academy/lesson/transhumanism-overview-history</Link>

<Link>debates.html</Link>
. (Accessed on 2 January 2024). </P>

<P>Kelly, K. 2016. The inevitable: Understanding the 12 technological forces that will shape our future. New York: Penguin. </P>

<P>Khan, MA. &amp; Vivek, MA. 2022. Artificial intelligence and big data: The advent of new pedagogy in the adaptive e-learning system in the higher educational institutions of Saudi Arabia. Education Research International
<Link> 2022. 10 pages. https://doi. </Link>

<Link>org/10.1155/2022/1263555 </Link>
</P>

<P>Kirk, A. 1996. Learning and the marketplace: A philosophical, cross-cultural (and occasionally irreverent) guide for business and academe. Carbondale: Southern Illinois University Press. </P>

<P>Korteling, JH., Van de Boer-Visschedijk, GC., Blankendaal, RA., Boonekamp, RC., &amp; Eikelboom, AR. 2021. Human- versus artificial intelligence. Frontiers in Artificial Intelligence 4. 622364. 
<Link>13 pages. https://doi.org/10.3389/frai.2021.622364 </Link>
</P>

<P>Kurosu, M. (Ed.): Human-computer interaction: Theory, methods and tools. HCII 2021. Lecture Notes in Computer Science 12762. Cham: 
<Link>Springer. https://doi.org/10.1007/978-3-030-78462-1 </Link>
</P>

<P>Lamchek, JS. 2023. Ensuring data science and its applications benefit humanity: Data monetization and the right to science. Human Rights Law Review
<Link> 23(3):1-23. https://doi.org/10.1093/hrlr/ </Link>

<Link>ngad018 </Link>
Lancaster, T. 2020. Commercial contract cheating provision through micro-outsourcing web sites. International Journal for Educational Integrity
<Link> 16(4). 14 pages. https://doi.org/10.1007/s40979-020</Link>

<Link>00053-7 </Link>
</P>

<P>Lang, JM. 2013. Cheating lessons: Learning from academic dishonesty. Cambridge: Harvard University Press. </P>

<P>LeCun, Y., Boser, B., Denker, JS., Henderson, D., Howard, RE., Hubbard, W., &amp; Jackel, LD. 1989. Handwritten digit recognition with a back-propagation network. In Touretzky, D. (Ed.): Advances in neural information processing systems, 396-404. Part 5. NIPS Conference. Denver: Morgan Kaufmann. </P>

<P>Leslie, D. 2019. Understanding artificial intelligence ethics and safety: A guide for the responsible design and implementation of AI systems in the public sector
<Link>. London: The Alan Turing Institute. https://doi. </Link>

<Link>org/10.2139/ssrn.3403301 </Link>
</P>

<P>Lu, M. 2023. How smart is ChatGPT? Visual Capitalism. 26 April 2023. Available at : https://www.visualcapitalist.com/how-smartis-chatgpt/#:~:text=ChatGPT%2C%20a%20language%20 model%20developed,help%20them%20with%20their%20 homework. (Accessed on 12 December 2023). </P>

<P>Lundgren, M. 2024. Large language models in student assessment: Comparing ChatGPT and human graders. arXiv preprint. 
<Link>https:// </Link>

<Link>doi.org/10.2139/ssrn.4874359 </Link>
</P>

<P>Mahabeer, P. &amp; Pirtheepal, T. 2019. Assessment, plagiarism and its effect on academic integrity: Experiences of academics at a university in South Africa. South African Journal of Science 115(11-12):1-8. 
<Link>https://doi.org/10.17159/sajs.2019/6323 </Link>
</P>

<P>Manne, R. &amp; Kantheti, SC. 2021. Application of artificial intelligence in healthcare: Chances and challenges. Current Journal of Applied Science and Technology
<Link> 40(6):78-89. https://doi.org/10.9734/ </Link>

<Link>cjast/2021/v40i631320 </Link>
</P>

<P>Marais, IE. 2022. Institutionalisation of academic integrity: Experiences at a distance education university in South Africa during COVID-19. Critical Studies in Teaching and Learning
<Link> 10(2):57-79. https://doi. </Link>

<Link>org/10.14426/cristal.v10i2.585 </Link>
</P>

<P>Mboti, N. 2023. Artificial general intelligence, sentience, and replaceability of humans: Hype or not? A heated theoretical exchange between two University of the Free State professors. 1st Foundational Digital Capabilities Research Seminar, Tshwane, 
<Link>South Africa. Available at: https://www.researchgate.net/ </Link>

<Link>publication/271390398_Artificial_General_Intelligence_ </Link>

<Link>Concept_State_of_the_Art_and_Future_Prospects</Link>
. (Accessed on 31 December 2023). </P>

<P>McCabe, DL. 2005. It takes a village: Academic dishonesty and educational opportunity. Liberal Education 91(3):26-31. </P>

<P>McCallum, S. 2023. ChatGPT banned in Italy over privacy concerns. BBC News
<Link>. 1 April 2023. Available at: https://www.bbc.com/news/ </Link>

<Link>technology-65139406</Link>
. (Accessed on 26 December 2023). </P>

<P>Medway, D., Roper, S., &amp; Gillooly, L. 2018. Contract cheating in UK higher education: A covert investigation of essay mills. British Educational Research Journal
<Link> 44:393-418. https://doi.org/10.1002/ </Link>

<Link>berj.3335 </Link>
</P>

<P>Meskó, B. 2023. Prompt engineering as an important emerging skill for medical professionals: Tutorial. Journal of Medical Internet Research
<Link> 25. p.e50638. 6 pages. https://doi.org/10.2196/50638 </Link>
</P>

<P>Motala, S. 2018. Achieving ‘free education’ for the poor: A realisable goal in 2018? Introduction Part III. Journal of Education 68:15-29. </P>

<P>Moravec, H. 1988. Mind children: The future of robot and human intelligence. Cambridge: Harvard University Press. </P>

<P>Mu, C. 2023. Based on natural language processing, human-computer dialogue, image recognition, and machine learning analysis whether artificial intelligence will surpass the human brain. Applied and Computational Engineering
<Link> 5(1):40-47. https://doi. </Link>

<Link>org/10.54254/2755-2721/5/20230526 </Link>
</P>

<P>Mwamwenda, TS. 2006. Academic integrity: South African and American university students. The Journal of Independent Teaching and Learning 1(1):34-44. </P>

<P>Nasrullah, N. &amp; Al Wahyu, T. 2024. The application of Chat GPT in English Language evaluation: A systematic literature review. Futurity Education 
<Link>4(3):217-235. https://doi.org/10.57125/ </Link>

<Link>FED.2024.09.25.13 </Link>
</P>

<P>Neha, K. 2020. Role of artificial intelligence in education. Alochana Chakra Journal 9:305-309. </P>

<P>OECD (Organisation for Economic Co-operation and Development). 2023. The impact of AI on the workplace: Main findings from the OECD AI surveys of employers and workers. Available at: https://www. oecd.org/employment-outlook/2023/#ai-jobs. (Accessed on 12 December 2023). </P>

<P>Oliver, E. 2022a. Society 5.0 and Education 5.0 with reference to higher education. In Oliver, E. (Ed.): Global initiatives and higher education in the fourth industrial revolution, 53-68. Auckland Park: 
<Link>UJ Press. https://doi.org/10.36615/9781776405619-02 </Link>
</P>

<P>Oliver, WH. 2022b. Global initiatives within the 4IR and the role of higher education. In Oliver, E. (Ed.): Global initiatives and higher education in the fourth industrial revolution, 27-52. Auckland Park: 
<Link>UJ Press. https://doi.org/10.36615/9781776405619 </Link>
</P>

<P>Pecorari, D. 2008. Academic writing and plagiarism: A linguistic analysis. London: Bloomsbury. </P>

<P>Perkins, M. 2023. Academic integrity considerations of AI large language models in the post-pandemic era: ChatGPT and beyond. Journal of University Teaching &amp; Learning Practice
<Link> 20(2). 24 pages. https:// </Link>

<Link>doi.org/10.53761/1.20.02.07 </Link>
</P>

<P>Perkins, M., Gezgin, UB., &amp; Gordon, RD. 2019. Plagiarism in higher education: Classification, causes and controls. Pan-Pacific Management Science 2:1-12. </P>

<P>Planck. 2018. From an almost perfect universe to the best of both worlds. 
<Link>17 July 2018. Available at: https://sci.esa.int/web/planck/</Link>

<Link>/60499-from-an-almost-perfect-universe-to-the-best-of</Link>

<Link>both-worlds</Link>
. (Accessed on 2 January 2024). </P>

<P>Pokrivcakova, S. 2019. Preparing teachers for the application of AI-powered technologies in foreign language education. Journal of Language and Cultural Education
<Link> 7(3):135-153. https://doi. </Link>

<Link>org/10.2478/jolace-2019-0025 </Link>
</P>

<P>Poola, I. 2023. Overcoming ChatGPT’s inaccuracies with pre-trained AI prompt engineering sequencing process. International Journal of Technology and Emerging Sciences 3(3):16-19. </P>

<P>Popenici, SAD. &amp; Kerr, S. 2017. Exploring the impact of artificial intelligence on teaching and learning in higher education. Research and Practice in Technology Enhanced Learning 12. 22. 13 
<Link>pages. https://doi.org/10.1186/s41039-017-0062-8 </Link>
</P>

<P>PWC (Price Waterhouse Coopers). 2017. No longer science fiction, AI and 
<Link>robotics are transforming healthcare. Available at: https://pwc. </Link>

<Link>to/2weGo5v</Link>
. (Accessed on 12 December 2023). </P>

<P>Quinlan, JR. 1986. Induction of decision trees. Machine Learning 1:81-106. 
<Link>https://doi.org/10.1007/BF00116251 </Link>
</P>

<P>Ragolane, M., Patel, S., &amp; Salikram, P. 2024. AI versus human graders: Assessing the role of large language models in higher education. Asian Journal of Education and Social Studies 50(10):244-263. 
<Link>https://doi.org/10.9734/ajess/2024/v50i101616 </Link>
</P>

<P>Robayo-Pinzon, O., Rojas-Berrio, S., Rincon-Novoa, J., &amp; Ramirez-Barrera, A. 2023. Artificial intelligence and the value co-creation process in higher education institutions. International Journal of Human-Computer Interaction
<Link>. 17 pages. https://doi.org/10.1080/10 </Link>

<Link>447318.2023.2259722 </Link>
</P>

<P>Roberts, J. 2016. Thinking machines: The search for artificial intelligence. Distillations Magazine 2(2):14-23. </P>

<P>Roe, J. &amp; Perkins, M. 2022. What are automated paraphrasing tools and how do we address them? A review of a growing threat to academic integrity. International Journal for Educational Integrity 
<Link>18. 15. 10 pages. https://doi.org/10.1007/s40979-022-00109-w </Link>
</P>

<P>Roose, K. 2023. A.I. poses ‘risk of extinction,’ industry leaders warn. The New York Times
<Link>. 30 May 2023. Available at: https://www.nytimes. </Link>

<Link>com/2023/05/30/technology/ai-threat-warning.html</Link>
. (Accessed on 7 January 2024). </P>

<P>Rothman, J. 2023. Why the godfather of A.I. fears what he’s built. The New Yorker
<Link>. 13 November 2023. Available at: https://www.newyorker. </Link>

<Link>com/magazine/2023/11/20/geoffrey-hinton-profile-ai</Link>
. (Accessed on 5 December 2023). </P>

<P>Russ, H., Soffel, MH., Kasai, MA., &amp; Börner, G. 1997. Age of the universe: Influence of the inhomogeneities on the global expansion factor. Physical Review 
<Link>D56(4):2044-2050. https://doi.org/10.1103/ </Link>

<Link>PhysRevD.56.2044 </Link>
</P>

<P>Russell, SJ. &amp; Norvig, P. 2021. Artificial intelligence a modern approach. 4th ed. London: Pearson. </P>

<P>Rutgers Business School. 2014. New academic integrity award will honor research of retiring Professor Donald L. McCabe. Rutgers Newark. 
<Link>20 April 2014. Available at: https://www.newark.rutgers.edu/ </Link>

<Link>news/new-academic-integrity-award-will-honor-research</Link>

<Link>retiring-professor-donald-l-mccabe</Link>
. (Accessed on 7 January 2024). </P>

<P>Sadler, DR. 1989. Formative assessment and the design of instructional systems. Instructional Science
<Link> 18:119-144. https://doi.org/10.1007/ </Link>

<Link>BF00117714 </Link>
</P>

<P>Samoili, S., Cobo, ML., Gómez, E., De Prato, G., Martínez-Plumed, F., &amp; Delipetrev, B., 2020. AI watch. Defining artificial intelligence: Towards an operational definition and taxonomy of artificial intelligence. JRC Technical Report. Luxembourg: Publications Office of the European Union. </P>

<P>Sharma, RC., Kawachi, P., &amp; Bozkurt, A. 2019. The landscape of artificial intelligence in open, online and distance education: Promises and concerns. Asian Journal of Distance Education 14(20):1-2. </P>

<P>Shortliffe, T. &amp; Davis, R. 1975. Some considerations for the implementation of knowledge-based expert systems. ACM SIGART Bulletin (55):9
<Link>12. https://doi.org/10.1145/1045253.1045254 </Link>
</P>

<P>Stevens, TN. 2013. Promoting a culture of integrity: A study of faculty and student perceptions of academic dishonesty at a large public Midwestern university. PhD thesis. University of Missouri-St. 
<Link>Louis. Available at: https://core.ac.uk/download/pdf/217322415. </Link>

<Link>pdf</Link>
. (Accessed on 5 January 2024). </P>

<P>Stone, A. 2023. Student perceptions of academic integrity: A qualitative study of understanding, consequences, and impact. Journal of Academic Ethics 
<Link>21:357-375. https://doi.org/10.1007/s10805-022</Link>

<Link>09461-5 </Link>
</P>

<P>Sutherland, SW. 2008. Plagiarism, the internet, and student learning: Improving academic integrity. New York: Routledge. </P>

<P>Takyar, A. 2022. AI use cases &amp; applications across major industries. LeewayHertz-Software Development Company Publications. 
<Link>Available at: https://www.leewayhertz.com/ai-use-cases-and</Link>

<Link>applications/</Link>
. (Accessed on 12 December 2023). </P>

<P>Thiebes, S., Lins, S., &amp; Sunyaev, A. 2021. Trustworthy artificial intelligence. Electronic Markets
<Link> 31:447-464. https://doi.org/10.1007/s12525</Link>

<Link>020-00441-4 </Link>
</P>

<P>Thomas, A. &amp; De Bruin, GP. 2015. Plagiarism in South African management journals. South African Journal of Science
<Link> 111(1/2). 3 pages. https:// </Link>

<Link>doi.org/10.17159/sajs.2015/20140017 </Link>
</P>

<P>Timenko, M. 2021. 21st century skills in school education in the United Kingdom. Тhe Sources of Pedagogical Skills
<Link> 26:202-206. https:// </Link>

<Link>doi.org/10.33989/2075-146x.2020.26.227654 </Link>
</P>

<P>Trilling, B. &amp; Fadel, C. 2009. 21st century skills: Learning for life in our times. San Francisco: Jossey-Bass. </P>

<P>Trott, S., Jones, C., Chang, T., Michaelov, J., &amp; Bergen, B. 2023. Do large language models know what humans know? Cognitive Science 
<Link>47(7). p.e13309. 21 pages. https://doi.org/10.1111/cogs.13309 </Link>
</P>

<P>Turing, AM. 1950. Computing machinery and intelligence. Journal of Mind 
<Link>59(236):433-460. https://doi.org/10.1093/mind/LIX.236.433 </Link>
</P>

<P>Vandana, KL. &amp; Nagaveni, NB. 2019. University grants commission (promotion of academic integrity and prevention of plagiarism in higher educational institutions) regulations, 2018. CODS-Journal of Dentistry 9(2):iv-vi. </P>

<P>Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, AN., Kaiser, Ł., &amp; Polosukhin, I. 2017. Attention is all you need. In Guyon, I., Von Luxburg, U., Bengio, S., Wallach, H., Fergus, R., Vishwanathan, S., &amp; Garnett, R. (Eds.): Advances in neural information processing systems 30. NeurIPS Proceedings. Available at:
<Link>https://proceedings.neurips.cc/paper_files/paper/2017. </Link>
(Accessed on 11 January 2024). </P>

<P>Venugopal, R. &amp; Mamatha, V. 2023. Impact of artificial intelligence (AI) on teaching and learning in India’s higher education sector. IOSR Journal of Research &amp; Method in Education 13(5). 6 pages. </P>

<P>Wahle, JP., Ruas, T., Kirstein, F., &amp; Gipp, B. 2022. How large language models are transforming machine-paraphrased plagiarism. In Goldberg, Y., Kozareva, Z., &amp; Zhang, Y. (Eds.): Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing (EMNLP) in Abu Dhabi, UAE, 952-963. Available at: 
<Link>https://aclanthology.org/2022.emnlp-main.62/</Link>
. (Accessed on 25 
<Link>January 2024). https://doi.org/10.18653/v1/2022.emnlp-main.62 </Link>
</P>

<P>Walker J. 2010. Measuring plagiarism: Researching what students do, not what they say they do. Studies of Higher Education 35(1):41-59. 
<Link>https://doi.org/10.1080/03075070902912994 </Link>
</P>

<P>WEF (World Economic Forum). 2016. Future of work: Ten 21st-century 
<Link>skills every student needs. 10 May 2016. Available at: https:// </Link>

<Link>www.weforum.org/agenda/2016/03/21st-century-skills-future</Link>

<Link>jobs-students/</Link>
. (Accessed on 9 January 2024). </P>

<P>Whitby, B. 2008. Artificial intelligence: A beginner’s guide. Oxford: One World. </P>

<P>William, S. 1993. On language: Words left out in the cold. The New York Times. 14 February 1993. Available 
<Link>at: https://query.nytimes.com/gst/fullpage. </Link>

<Link>html?res=9F0CE7DF1138F937A25751C0A965958260</Link>
. (Accessed on 25 January 2024). </P>

<P>Winograd, T. 1971. Procedures as a representation for data in a computer program for understanding natural language. PhD thesis, Massachusetts Institute of Technology, Cambridge. Available at: 
<Link>https://apps.dtic.mil/sti/pdfs/AD0721399.pdf</Link>
. (Accessed on 6 January 2024). </P>

<P>Winter, A. 2023. The Hollywood writers AI deal sure puts a lot of trust in studios to do the right thing. Wired. 28 September 2023. Available 
<Link>at: https://www.wired.com/story/writers-strike-hollywood-ai</Link>

<Link>protections/</Link>
. (Accessed on 25 December 2023). </P>

<P>Wu, Z., Ji, D., Yu, K., Zeng, X., Wu, D., &amp; Shidujaman, M. 2021. AI creativity and the human-AI co-creation model. In Kurosu, M. (Ed.): Human-computer interaction: Theory, methods and tools, 171-190. HCII 2021. Lecture Notes in Computer Science 12762. Cham: 
<Link>Springer. https://doi.org/10.1007/978-3-030-78462-1_13 </Link>
</P>

<P>Yadav, P., Tudela, LAM., &amp; Marco-Lajara, B. 2024. The role of AI in assessing and achieving the sustainable development goals (SDGs). In Irfan, M., Hussainey, K., Bukhari, SAC., &amp; Nam, Y. (Eds.): Issues of sustainability in AI and new-age thematic investing, 
<Link>1-17. Hershey: IGI Global. https://doi.org/10.4018/979-8-3693</Link>

<Link>3282-5.ch001 </Link>
</P>

<P>Yeo, MA. 2023. Academic integrity in the age of artificial intelligence (AI) authoring apps. TESOL Journal
<Link> 14(3). e716. 18 pages. https://doi. </Link>

<Link>org/10.1002/tesj.716 </Link>
</P>

<P>Zaphiris, P. &amp; Ioannou, A. (Eds.). 2021. Learning and collaboration technologies: New challenges and learning experiences. 8th International Conference, LCT 2021, held as part of the 23rd HCI International Conference, HCII 2021, Virtual Event, 24-29 July 2021. Proceedings Part I (Vol. 12784). Cham: Springer Nature. </P>

<P>Zhang, Y. 2017. ‘Type 20: Corruption in education.’ The book of swindles: Selections from a late Ming collection. New York: Columbia 
<Link>University Press. https://doi.org/10.7312/zhan17862 </Link>
</P>

<P>Zhang, ZV. 2020. Engaging with automated writing evaluation (AWE) feedback on L2 writing: Student perceptions and revisions. Assessing Writing
<Link> 43. 100439. https://doi.org/10.1016/j. </Link>

<Link>asw.2019.100439 </Link>
</P>
</Sect>
</Sect>
</Sect>

<Sect>
<H2 id="LinkTarget_10756">Chapter 3 </H2>

<Link><Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_24.jpg"/>
</Figure>
</Link>

<P>Critical Considerations for Establishing a </P>

<P>Link between Artificial Intelligence and </P>

<Sect>
<H3>Quality Assurance in Higher Education </H3>

<P>Faiza Gani </P>

<Link><Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_25.jpg"/>
</Figure>
</Link>

<Link>
<P>Department of Language Education, Arts and Culture University of South Africa Pretoria, South Africa </P>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_26.jpg"/>
</Figure>
</Link>

<P>The history of AI is a history of fantasies, possibilities, </P>

<P>demonstrations, and promise </P>

<P>(Buchanan 2005:53). </P>

<Sect>
<H4>Introduction </H4>

<P>AI (Artificial intelligence) has permeated the world and currently has an undeniable footprint in most areas of society. In relation to the HE (higher education) sector, AI is becoming a prominent feature. AI in HE is readily accessible through the use of ‘intelligent tutoring systems, teaching robots, learning analytics dashboards, adaptive learning systems and human-computer interfaces’ (Ishak &amp; Jiang 2022:70). Pelletier, Robert, Muscanell, McCormack, Reeves, Arbino, and Grajek (2023:4) aptly highlight the impact that AI currently has on the globe and the need to consider it as a current trend in the HE sector. Huang, Saleh, and Liu (2021:206) similarly argue that it is indisputable that AI has infiltrated the education space. As modern science and technology progress, the advancement in AI has also progressed. Research indicates that AI, when applied to education, has resulted in positive effects which have aided toward teaching reform (Huang et al. 2021:206). </P>

<Link><Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_27.jpg"/>
</Figure>
</Link>

<P>Two major areas which will be significantly impacted by AI in HE are curriculum offerings and enrolment areas (Dhawan &amp; Batra 2020:13). Joshi, Rambola, and Churi (2021:2 of 13) further highlight that AI in education has drawn attention to the following: </P>

<P>• Automation: AI is beneficial in automating a number of tasks. These include assessment, classifying digital assets </P>

<P>and schedules, and freeing up time for educators to interact with students as AI takes care of simple teaching tasks. </P>

<L>
<LI>
<Lbl>• </Lbl>

<LBody>Acclimation: In a digitally dominated world, AI will assist in equipping students with technological skills. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Integration: An application to teach students is possible </LBody>
</LI>
</L>

<P>through the integration of AI solutions and other IT </P>

<P>(information technology) initiatives such as intelligent technology. </P>

<P>• Delineation: AI can identify fundamental trends in education thereby directing stakeholders to make decisions about the curriculum which are topical and relevant in nature. </P>

<P>AI has gained traction in HE due to its ability to make a contribution by means of automating aspects of the teaching experience. Additionally, personalisation is a possibility through AI applications, as well as individualised feedback and identification of topics which require revision in class. This is made possible through the AI application detecting students’ success in learning which is informed by identifying students’ content knowledge and personal characteristics (Hinojo-Lucena, Aznar-Diaz, Caceres-Reche, &amp; Romero-Rodriguez 2019:2). Aldosari (2020:147) explains that AI can make a contribution to education for all stakeholders: This is possible though managing the integration of a variety of types of human interaction and information and communication technology. </P>

<P>Amidst the pervasive presence of AI in HE, issues of the impact that AI will have on QA (quality assurance) in HE comes to the fore. QA refers to a mechanism to demonstrate that the practices in which IHEs (institutions of higher education) engage in are both professional and credible (Aluko, Krull, &amp; Mhlanga 2022:7). Stanislav (2023:1056) cautions that AI could compromise the core of HE and its mandate to create and disseminate knowledge in society. Dhawan and Batra (2020:15) further signal the use of AI as a technology which is fragile in nature thereby exposing universities to a range of risks and challenges. In this regard these authors argue that IHEs need to be cognisant of these potential risks so that they maintain the correct control of power. </P>

<P>Zawacki-Richter, Marín, Bond, and Gouverneur (2019:4) similarly highlights a number of risks and implications from an ethical perspective as AI continues to become more evident in HE. From the human resource space, HE staff (lecturers, tutors, student counsellors, and staff designated to the administrative space) may feel threatened by AI applications such as chatbots, online intelligent tutors, and other expert systems arguing that these commodities will replace them. Further, AI has the potential to accelerate the capabilities of learning analytics. However, systems of this nature require significant amounts of data which may include confidential information about staff and students thereby raising concerns from a privacy and data protection point of view. Bearman, Ryan, and Ajjawi (2023:370) state that AI is not just a matter for technological innovation but also represents a fundamental change in the relation between HE and broader socioeconomic interests. At this time of accelerated change, where the social shifts are as significant as the technological ones, IHEs need to set strong policies and research agendas that attend to AI and take account of ethical implications (Bearman et al. 2023:370). </P>

<P>In 2020, Aldosari (2020:147) has highlighted expectancies about what life would be like in 2024 with specific reference to educational technology development. These refer to pedagogy, learning approaches and models, and approaches to make these easier. With reference to the expectancies Aldosari (2020:147) highlights that the realities in 2024 would match these expectancies with AI being a notable technological development. In this context, the question arises as to whether institutions are ready for this type of unprecedented change. As such it becomes incumbent for IHEs to interrogate the use of existing technology as well as become deliberate about the implementation of new technologies, which will create the opportunity for increased teaching and learning opportunities which are flexible in nature (Pelletier et al. 2023:10). This must be considered within the context of leveraging new technologies whilst assuring quality. </P>

<P>This chapter serves as vantage point to establish the use of AI in HE, consider the opportunities and challenges for using AI in HE, and most importantly establish a link between AI and QA. </P>
</Sect>

<Sect>
<H4>Approach Used for this Study </H4>

<P>An approach adopted by Grassini (2023) is followed in this study, starting with the main research question of this study which is: What is the link between AI and QA? This research question was explored primarily through published literature. Google scholar was used as the primary search engine using the search strings ‘artificial intelligence,’ ‘higher education,’ and ‘challenges and opportunities.’ The literature was surveyed in a nonsystematic manner. </P>

<P>The years from 2019 to 2023 were included to represent a five year period, which is current and relevant in nature. The initial articles were surveyed and starting points with the snowball method were used to find additional articles. Additional articles were, for example, identified from the reference lists of the selected articles. This chapter uses the articles as informants for exploring the main research question. In the next section the definition of AI is explored to gain a better understanding of the term. </P>
</Sect>

<Sect>
<H4>What is Artificial Intelligence? </H4>

<P>‘AI’ is a term which dates back to the 1950s. It arose from the work of scientists who explored the idea of AI. Seminal developments include the creation of an AI programme referred to as ELIZA as noted by King (2023:2). This programme was made to copy human speech. As the time periods advanced so did AI developments parallel to the time periods. Later years saw the development of more advanced chatbots and other AI applications which had the potential to respond to and comprehend requests which were complex in nature (King 2023:2; Crompton &amp; Burke 2023:2). </P>

<P>Buchanan (2005:54) explains that AI goes beyond robots and should be understood from the perspective of grasping the nature of intellectual thinking and behaviour using computers as experimental devices. During its determining years, AI was influenced by a number of disciplines (Buchanan 2005:56) including engineering, biology, experimental psychology, philosophy, mathematics, statistics, and linguistics. Buchanan (2005:56) highlights that a seminal paper published in 1950 by Turing in the philosophy journal Mind, acts as a turning point in the history of AI. Turing’s paper sets a seminal discourse about the prospect of programming a computer to behave intelligently. Initial programmes in AI were limited by the technology of the time, nonetheless they demonstrated an impressive ability in answering problems that only people had been able to previously answer (Buchanan 2005:57). </P>

<P>There is a plethora of definitions for the term ‘AI.’ Kelly, Kaye, and Oviedo-Trespalacios (2023:2 of 33) highlight that providing a definition for AI at an academic, government, and community level poses a challenge. AI is a significantly contested concept and there has not been much consensus regarding its definition in the different fields in which it is used (Kelly et al. 2023:2 of 33). For the purposes of this chapter a few selected definitions are discussed below. </P>

<P>Kengam (2020) describes AI as a computer-based technology which has the ability to offer personalised, adaptive, and insightful educational experiences. The description of AI as put forward by Ocana-Fernandez, Valenzuel-Fernandez, &amp; Garro-Aburto (2019:557) assume AI to be related to the design of intelligent systems which display the characteristics associated with human intelligence. Jain &amp; Jain (2019:145) claim that AI is the impersonation of human comprehension and virtual decision making by robots and machines. </P>

<P>According to Chen, Chen, and Lin (2020:75265), AI is associated with a supercomputer which has in nature a massive processing potential. Functions which this supercomputer can execute, include adaptive behaviour whereby the inclusion of sensors and a range of capabilities provide the AI ability to mimic humanlike thinking. Hinojo-Lucena et al. (2019:2) opines that AI is a technology which is aimed at the construction of computer systems. These computer systems display smart and adaptive behaviours with the capability to acquire information from their environments mimicking what humans can do. The ability of computers to behave in a similar manner as the human brain for the improvement of the world is the description brought forward by Dhawan and Batra (2020:12). Aldosari (2020:145) outlines that AI is a distinct and current application of information systems which has a vested interest in unpacking and understanding human intelligence and replications to create the current generation of smart computers. These smart computers require a significant degree of ability to accomplish tasks which require inference, deduction, and perception. </P>

<P>While the latter cited authors offer different descriptions of AI the overriding commonalties which emanate from these definitions is that AI denotes the use of computers to simulate human thinking. Figure 3.1 helps to demonstrate this. </P>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_28.jpg"/>

<Caption>
<P>Figure 3.1: Artificial intelligence defined. (Source: Personal archive) </P>
</Caption>
</Figure>

<P>According to Kaul, Enslin, and Gross (2020:808), there are four different subfields of AI which can be distinguished: </P>

<L>
<LI>
<Lbl>• </Lbl>

<LBody>Machine learning relates to the identification of patterns and analysis. Machines have the capability to improve with experience from provided data sets. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Deep learning is made up of multi-layer neural networks which provide the ability for machines to learn and make independent decisions. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Natural language processing involves a process whereby computers are able to take data from human language and make a decision based on the data. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Computer vision consists of images and videos which are used for a computer to gain comprehension and information. </LBody>
</LI>
</L>

<P>While it is important to delineate the different subfields of AI, this chapter will not focus on a specific subfield but instead on AI in its general context. </P>
</Sect>

<Sect>
<H4>How AI is used in Higher Education </H4>

<P>To establish the manner in which AI is used in HE the work of Crompton and Burke (2023) is referred to. These authors have conducted a systematic review of AI in HE from 2016 to 2022. A summary of their findings highlight five areas for the use of AI in HE, which will be discussed in the sections below. </P>

<Sect>
<H5>Assessment/Evaluation </H5>

<P>Crompton and Burke (2023:14) highlight that AI is used in various ways for assessment in HE. These include automatic assessments like the use of assessment of subjective questions, grading of thesis statements, essay grading, and critique of free text. AI applications are also used to create assessments of varying types. Feedback on a group and personalised level is also facilitated through AI. The reviewing of online activities is also cited in terms of AI applications being used to review student interactions, reflections, and community identity among others. Finally, the evaluation of educational resources is also cited by Cromptom and Burke (2023:14) in terms of AI applications having been used to evaluate textbooks and educational resources. </P>
</Sect>

<Sect>
<H5>Predicting </H5>

<P>Predicting in terms of HE data proved to be an affordance made possible through the use of AI. Crompton and Burke (2023:15) assert that HE data are utilised in the AI space through making predictions regarding academic performance, dropout, and at risk students, satisfaction, career decisions, and the future of HE among other types of data. </P>
</Sect>

<Sect>
<H5>AI Assistant </H5>

<P>Crompton and Burke (2023:16) highlight that AI assistants are used in HE to offer students assistance in different mediums e.g., chatbots, virtual assistants, and learning agents. These assistants offer an array of support which include offering students out of class support, scaffolding, answering of questions, and student outreach, to mention a few. </P>
</Sect>

<Sect>
<H5>Intelligent Tutoring Systems </H5>

<P>ITSs (intelligent tutoring systems) refer to systems that entail the use of educational methods and AI techniques. Within an ITS personalisation is possible as the system adapts activities and strategies based on the individual student needs (Crompton &amp; Burke 2023:16). In large classes where the instructor may experience a challenge in keeping up with the volume and pace of student queries, ITSs may be useful in responding to students and providing immediate feedback and instruction (Crompton &amp; Burke 2023:17). </P>
</Sect>

<Sect>
<H5>Managing Student Learning </H5>

<P>Crompton and Burke (2023:17) explain that university staff members access AI to manage student learning. In this regard AI could be utilised to analyse the effect of teaching, to cluster students, to sequence the curriculum, to profile personalities, and a range of other facets related to managing student learning. Figure 3.2 summarises the use of AI in HE as described by Crompton and Burke (2023). </P>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_29.jpg"/>

<Caption>
<P>Figure 3.2: Use of AI in HE. (Adapted from Crompton &amp; Burke 2023:14-17 of 22) </P>
</Caption>
</Figure>

<P>From the information above we can conclude that there is a definite potential for the use of AI in HE. The sections below look further into the opportunities and challenges for the use of AI in HE. </P>
</Sect>
</Sect>

<Sect>
<H4>Opportunities for the Use of AI in HE </H4>

<P>The literature (cf. Grassini 2023; Pelletier et al. 2023; Dhawan &amp; Batra 2023) presents an array of opportunities for the use of AI in HE. Grassini (2023:3 of 13) highlights that Gen-AI (generative artificial intelligence), for example ChatGPT (chat generative pre-trained transformer), offers the advantage of creating lesson plans and presentations which engage recipients and a range of educational resources. New and innovative teaching methods are also an added opportunity as educators will have more support available through AI thereby creating time and space for more engaging teaching resources and environments. Additionally, Kengam (2020), as well as Chatterjee and Bhattacharjee (2020:3444) present the aspect of customisation in the learning space. In this regard learning environments can be tailored to suit the individual learning needs of students. Regarding assessment, automated grading alleviates the workload of the educator thereby creating space for them to utilise the saved time on the development of students. </P>

<P>ITSs, which facilitate the process of using automated tutors to teach different disciplines and human NLP (natural language processing) systems are examples of AI in HE which have had positive impacts. Ocana-Fernandez et al. (2019:562) highlight that these systems create an opportunity for diverse classrooms with the potential to individualise the learning experience. Pelletier et al. (2023:10) further discuss the impact of AI on HE. In this regard, AI can assist the educator with the creation of teaching content and automated grading. This creates the opportunity for educators to have additional time to focus on higher order pedagogical tasks thereby creating new knowledge for their students. The use of avatars and the metaverse for example, also creates engaging opportunities and personalisation for students. With the help of AI universities around the world are enrolling an increased number of students due to an increased flexibility and speed (Jain &amp; Jain 2019:146). </P>

<P>Customisation of the learning experience as discussed by Dhawan and Batra (2020:17) is possible through AI, where a ‘one-size-fits-all’ approach can be altered to suit the needs of each individual student. Owing to the range of factors that influence learning, some of which include the knowledge that the student brings to the learning encounter, the rate at which students learn and absorb knowledge, and cultural views, AI can serve as a mechanism to accommodate different learning styles. Dhawan and Batra (2020:17) provide the example of emotion AI, which makes it possible to study behaviour which is non-verbal in nature. Emotion AI has the ability to respond to cues such as fear, happiness, and anger and deliver lessons and establish how engaged students are in the lesson. AI can further equip students with ‘tech-savvy’ skills necessary for the working environment. </P>

<P>Virtual assistants can be of great benefit in terms of responding to student queries. Additionally, virtual assistants can help with the enrolment, monitoring drop-out rates and retention, and prompting university staff to intervene accordingly. From a research perspective AI can be useful in managing structured and unstructured data, processing data, and suggesting new models with the potential to revise research practices in disciplines which are traditionally defined. AI can additionally assist administrators in education regarding decision-making in terms of the development of courses, pedagogical design, and transformation in the academic space (Ouyang, Zheng, &amp; Jiao 2022:7895). </P>

<P>Kuleto, Ilić, Dumangiu, Ranković, Martins, Păun, &amp; Mihoreanu (2021:7 of 16) highlight that AI can offer IHEs a number of benefits. Key processes such as student enrolment, curriculum planning, the creation and offering of personalised lessons, and mitigating high dropout numbers can be assisted via AI. As these processes can be challenging to manage, IHEs may consider using AI to manage them. Specifically learning analytics can unearth student data for the management of these processes. Chatbots and virtual tutor assistants can also assist. Repetitive tasks which are time consuming in nature can also be supported by AI, thereby offering an improved learning experience (Kuleto et al. 2021:7 of 16). AI is further beneficial in the curriculum space enabling personalised learning experiences through the use of virtual reality and robotics, among others. </P>

<P>AI has the ability to provide instruction, guidance, and feedback which are prompt in nature. In this manner time delays are avoided and unlike humans who may become tired due to large volumes, this will not present a challenge to AI (Crompton &amp; Burke 2023:17). Table 3.1 below summarises the opportunities for the use of AI in HE. </P>

<Table>
<Caption>
<P>Table 3.1: Summary of opportunities for the use of AI in HE </P>
</Caption>

<TR>
<TD>Author </TD>

<TD>Key Opportunity </TD>
</TR>

<TR>
<TD>Grassini (2023) </TD>

<TD>Lesson plans. Presentations. Educational resources. Innovative teaching methods. </TD>
</TR>

<TR>
<TD>Kengam (2020); Chatterjee and Batacharjee (2020) </TD>

<TD>Customisation. Individual learning needs. Automated grading. Additional instructional time. </TD>
</TR>

<TR>
<TD>Ocana-Fernandez et al. (2019) </TD>

<TD>Individual learning experience. </TD>
</TR>
</Table>

<Table>
<TR>
<TD>Author </TD>

<TD>Key Opportunity </TD>
</TR>

<TR>
<TD>Pelletier et al. (2023) </TD>

<TD>Creation of teaching content. Automated grading. Added instructional time equals additional time for higher order thinking. Personalised learning. </TD>
</TR>

<TR>
<TD>Jain and Jain (2019) </TD>

<TD>Increased student enrolment. </TD>
</TR>

<TR>
<TD>Dhawan &amp; Batra (2020) </TD>

<TD>Customisation of learning experience. Accommodation of different learning styles. Equip student with ‘tech-savvy’ skills. </TD>
</TR>

<TR>
<TD>Ouyang et al. (2022) </TD>

<TD>Virtual assistants respond to student queries, assist with student enrolment, monitor dropout rates, and retention. Managing research. Assist in decision making, development of courses, pedagogical design, and transformation. </TD>
</TR>

<TR>
<TD>Kuleto et al. (2021) </TD>

<TD>Student enrolment. Curriculum planning. Personalised lessons/learning. experience. Mitigating high dropout rate. Chatbots and virtual tutor assistants. </TD>
</TR>

<TR>
<TD>Crompton and Burke (2023) </TD>

<TD>Instructional guidance. Prompt feedback provided to students. Respond to queries on a mass scale. </TD>
</TR>
</Table>
</Sect>

<Sect>
<H4>Challenges for the Use of AI in Higher Education </H4>

<P>The progression in AI has raised ‘deliberations around academic integrity, accuracy, fairness and equity’ (Pelletier et al. 2023:10) in the HE space. Further concern has been highlighted in terms of whether AI supplies output which is accurate and unbiased in nature. Outputs for example could refer to automated grading which can be questioned in terms of its accuracy. Developments in Gen-AI (e.g., ChatGP) are designed to generate answers for students. This automated generation of answers threatens the potential for students to meet learning aims of the module as they do not engage in the formulation of their own answers (Stanislav 2023:1057) and higher order thinking skills. </P>

<P>Concerns of ethics is also associated with the use of AI. In the HE space, the behaviour of all stakeholders is regulated and underpinned by ethics. This includes teaching, development of assessments, marking, and the manner in which students and staff interact and communicate with others. AI could pose a challenge to ethical behaviour in terms of thinking on behalf of the human being. Engaging in critical thinking and the ability for students to be innovative has a distinct link to economic and social effects as well as the ability of IHEs to compete with their counterparts (Stanislav 2023:1057). </P>

<P>Huang et al. (2021:212) highlight that in developing countries the use of new technologies can exacerbate a division between people. Basic infrastructure and technology must be in place for AI to provide equal opportunities for all to mitigate a situation where the digital divide is widened. Issues of safety and ethics also become a challenge in the AI space when data are collected, used, and shared. AI applications require of educators to master digital skills as the programmes which offer teaching and learning are built on applications which do require a range of digital literacy skills. The deterioration of students’ social and communication skills also poses a challenge in the AI environment. If students primarily communicate via machines, a lesser requirement of human communication is needed. As such, AI therefore poses a challenge in terms of communication skills (Huang et al. 2021:213). Hutson, Jeevanjee, Vander Graaf, Lively, Weber, Weir, Arnone, Carnes, Vosevich, Plate, and Leary (2022:3969) highlights alarming ethical concerns for the use of AI in education, with bias and privacy noted as the most prominent AI concerns. </P>

<P>Ouyang et al. (2022:7908-7909) narrate that AI faces a number of challenges in the education space. Questions regarding how to meet the learning needs of students, what to provide to them, and the mechanism to provide them with the correct degree of agency are some of the challenges facing AI. A range of systematic reviews indicate a common challenge in terms of the use of AI in education. This common challenge depicts a lack of a link between AI practices and implementation, and theoretical frames of reference (Ouyang et al. 2022:7908-7909). </P>

<P>A summary of the challenges for AI in HE is highlighted in Table 3.2. </P>

<Table>
<Caption>
<P>Table 3.2: Summary of challenges for the use of AI in HE </P>
</Caption>

<TR>
<TD>Author </TD>

<TD>Key Challenge </TD>
</TR>

<TR>
<TD>Pelletier et al. (2023) </TD>

<TD>Academic integrity. Accuracy. Fairness. Equity. </TD>
</TR>

<TR>
<TD>Stanislav (2023) </TD>

<TD>Accurate unbiased output. Generation of automated answers. A lack of higher order thinking skills. Concern regarding ethics. Thinking on behalf of humans. A lack of thinking among students could impact IHEs’ competitiveness. </TD>
</TR>

<TR>
<TD>Huang et al. (2021) </TD>

<TD>Widening of digital divide in developing countries. Requirement for educators to have digital skills. Deterioration of student communication skills. </TD>
</TR>

<TR>
<TD>Hutson et al. (2022) </TD>

<TD>Ethics: Privacy and bias. </TD>
</TR>

<TR>
<TD>Ouyang et al. (2022) </TD>

<TD>What to provide to students. How to provide resources to students. Providing students with the appropriate degree of agency. A break in the link between AI and theoretical frames of reference. </TD>
</TR>
</Table>
</Sect>

<Sect>
<H4>Establishing the Link between AI and QA </H4>

<P>To establish a possible link between AI and QA the work of Mohee (2019) is referred to. Mohee (2019) highlights that QA within the HE space needs to factor in the following: Programme design, learner support systems, materials development, student assessment, infrastructure, and facilities. The author of this chapter uses Mohee’s factors of consideration and relates them to the opportunities and challenges of AI (as highlighted in Tables </P>

<P>3.1 and 3.2). Both these tables are used as a point of reference to establish the possible link between AI and QA. </P>

<P>Table 3.3 presents the key QA factors as highlighted by Mohee (2019) and the possible link they have to the key opportunities as highlighted in Table3. 1. </P>

<Table>
<Caption>
<P>Table 3.3: Establishing the link between AI opportunities and quality assurance factors </P>
</Caption>

<TR>
<TH>Author </TH>

<TH>Key opportunity </TH>

<TH>Link to Mohee’s QA factors </TH>
</TR>

<TR>
<TD>Grassini (2023) </TD>

<TD>Lesson plans. Presentations. Educational resources. Innovative teaching methods. </TD>

<TD>Programme design. Learner support systems. Materials development. Student assessment. </TD>
</TR>

<TR>
<TD>Kengam (2020) and Chatterjee and Batacharjee (2020) </TD>

<TD>Customisation. Individual learning needs. Automated grading. Additional instructional time. </TD>

<TD>Programme design. Learner support systems. Student assessment. </TD>
</TR>

<TR>
<TD>Ocana-Fernandez et al. (2019) </TD>

<TD>Individual learning experience. </TD>

<TD>Learner support systems. </TD>
</TR>

<TR>
<TD>Pelletier et al. </TD>

<TD>Creation of teaching </TD>

<TD>Programme design. </TD>
</TR>

<TR>
<TD>(2023) </TD>

<TD>content. Automated grading. Added instructional time. Additional time for higher order thinking. Personalised learning. </TD>

<TD>Learner support systems. Materials development. </TD>
</TR>

<TR>
<TD>Jain and Jain (2019) </TD>

<TD>Increased student enrolment. </TD>

<TD>Infrastructure and facilities. </TD>
</TR>

<TR>
<TD>Dhawan and Batra (2020) </TD>

<TD>Customisation of learning experience. Accommodation of different learning styles. Equip student with ‘tech-savvy’ skills. </TD>

<TD>Student support systems. </TD>
</TR>

<TR>
<TH>Author </TH>

<TH>Key opportunity </TH>

<TH>Link to Mohee’s QA factors </TH>
</TR>

<TR>
<TD>Ouyang et al. </TD>

<TD>Virtual assistants respond </TD>

<TD>Infrastructure and </TD>
</TR>

<TR>
<TD>(2022) </TD>

<TD>to student queries, assist with student enrolment, monitor drop-out rates and retention. Managing research. Assist in decision making. Development of courses, pedagogical design, and transformation. </TD>

<TD>facilities. Programme design. Student support systems. Materials development. </TD>
</TR>

<TR>
<TD>Kuleto et al. (2021) </TD>

<TD>Student enrolment. Curriculum planning. Personalised lessons/ learning experience. Mitigating high dropout. Chatbots and virtual tutor assistants. </TD>

<TD>Infrastructure and facilities. Programme design. Materials developments. Student support systems. </TD>
</TR>

<TR>
<TD>Crompton and </TD>

<TD>Instructional guidance. </TD>

<TD>Student support </TD>
</TR>

<TR>
<TD>Burke (2023) </TD>

<TD>Prompt feedback provided to students. Respond to queries on a mass scale. </TD>

<TD>systems. Student assessment. Infrastructure and facilities. </TD>
</TR>

<Caption>
<P>Table 3.4 presents the key QA factors as highlighted by Mohee (2019) and the possible link they have to the key challenges as highlighted in Table 3.2. </P>
</Caption>
</Table>

<P>Table 3.4:Establishing the link between AI challenges and quality assurance factors </P>

<Table>
<TR>
<TH>Author </TH>

<TH>Key challenge </TH>

<TH>Link to Mohee’s QA factors </TH>
</TR>

<TR>
<TD>Pelletier et al. </TD>

<TD>Academic integrity. </TD>

<TD>Student assessment. </TD>
</TR>

<TR>
<TD>(2023) </TD>

<TD>Accuracy. </TD>

<TD/>
</TR>

<TR>
<TD/>

<TD>Fairness. </TD>

<TD/>
</TR>

<TR>
<TD/>

<TD>Equity. </TD>

<TD/>
</TR>

<TR>
<TH>Author </TH>

<TH>Key challenge </TH>

<TH>Link to Mohee’s QA factors </TH>
</TR>

<TR>
<TD>Stanislav (2023) </TD>

<TD>Accurate unbiased output (automated grading). Generation of automated answers. A lack of higher order thinking skills. Concern regarding ethics. Thinking on behalf of humans. A lack of thinking from students could impact IHEs’ competitiveness. </TD>

<TD>Student assessment. Programme design. Student support systems. Materials development. Infrastructure and facilities. </TD>
</TR>

<TR>
<TD>Huang et al. (2021) </TD>

<TD>Widening of digital divide in developing countries. Requirement for educators to have digital skills. Deterioration of student communication skills. </TD>

<TD>Infrastructure and facilities. Programme design. </TD>
</TR>

<TR>
<TD>Hutson et al. (2022) </TD>

<TD>Ethics: Privacy and bias. </TD>

<TD>Infrastructure and facilities. </TD>
</TR>

<TR>
<TD>Ouyang et al. </TD>

<TD>What to provide to </TD>

<TD>Programme design. </TD>
</TR>

<TR>
<TD>(2022) </TD>

<TD>students. How to provide resources to students. Providing students with the appropriate degree of agency. A lack in the link between AI and theoretical frames of reference. </TD>

<TD>Infrastructure and facilities. </TD>
</TR>
</Table>

<P>An evaluation of Tables 3.3 and 3.4 highlights that each of Mohee’s factors (i.e., programme design, student support systems, materials development, student assessment, infrastructure, and facilities) features across the cited AI opportunities and challenges. This implies that the opportunities presented by AI can have a positive impact on QA. </P>

<P>Lesson plans, presentations, educational resources, and innovative teaching methods as cited by Grassini (2023) link </P>

<P>to programme design, student support systems, materials development, and student assessment. In this regard AI opportunities could have a positive impact on QA in terms of programme design, student support systems, materials development, and student assessment. Kengam (2020), as well as Chatterjee and Batacharjee (2020:3444) highlight customisation, individual learning needs, automated grading, and additional instructional time as AI opportunities. These in turn link to programme design, student support systems, and student assessment QA factors implying a positive impact in this regard. Individual learning experiences (Ocana-Fernandez et al. 2019:555) have a direct link to student support systems, while the creation of teaching content, automated grading, added instructional time, and personalised learning (Pelletier et al. 2023:10) can contribute positively to the QA factors of programme design, student support systems and materials development. Increased student enrolment (Jain &amp; Jain 2019:146) is a positive QA factor made possible through infrastructure and facilities. </P>

<P>The customisation of the learning experience, accommodation of different learning styles, and equipping students with ‘tech-savvy’ skills link to the QA factor of student support systems, indicating a positive connection in this regard. Virtual assistants by way of responding to student queries, assisting with student enrolment, monitoring dropout rates and retention, managing research, and assisting in decision making through the development of courses as well as pedagogical design and transformation (Ouyang et al 2022:7895), have a direct link and positive contribution to the QA factors of infrastructure and facilities, programme design, learner support systems, and materials development. </P>

<P>Student enrolment, curriculum planning, personalised lessons/learning experience, mitigating a high dropout rate and the use of chatbots and virtual tutor assistants (Kuleto et al. 2021:7 of 16) on the other hand link to and have a positive impact on the QA factors of infrastructure and facilities, programme design, student support systems, and materials development. Finally, instructional guidance, feedback promptly provided to students, and responding to queries on a mass scale (Crompton &amp; Burke 2023:17) positively influence the QA factors of student support systems, student assessment, and infrastructure and facilities. </P>

<P>Evaluating the link between Mohee’s QA factors and the key AI challenges it appears that academic integrity, accuracy, fairness, and equity (Pelletier et al. 2023:10) could negatively influence student assessment in the HE space. Further accurate unbiased output (automated grading), the generation of automated answers, a lack of higher order thinking skills, concerns regarding ethics, thinking on behalf of humans, and a lack of thinking from students (Stanislav 2023:1057) could impact IHEs’ competitiveness and have a negative effect on the QA factors of student assessment, programme design, student support systems, materials development, and infrastructure and facilities. A widening of the digital divide in developing countries, the requirement for educators to have digital skills, and a deterioration of student communication skills (Huang et al. 2021:212) are linked to the QA factors of infrastructure, facilities, and programme design. This implies that if there is not a proper infrastructure and facilities in place AI could widen the digital divide in developing countries, additionally a deterioration of student communication skills could negatively impact programme design. </P>

<P>Hutson et al. (2022) raise the challenge of ethics with specific reference to privacy and bias. If IHEs do not have competent infrastructures and facilities in place, ethics could be compromised. </P>

<P>Lastly, an AI application needs to make decisions in terms of what to provide to students, how to provide resources to them, and providing them with the appropriate degree of agency. If these decisions are not appropriately taken QA factors of programme design could be negatively impacted. </P>

<P>The discussions above provide an evaluation of the link between AI and QA from both a positive and negative lens. In terms of impact the positive links between AI opportunities and QA as highlighted above could then positively influence the QA factors mentioned. On the other hand, the negative links between AI challenges and QA factors, as highlighted could impact negatively on QA factors as demonstrated in the discussion above. </P>

<P>What this implies is that IHEs need to carefully look at the challenges and opportunities that AI offers. Where AI opportunities can improve QA factors these must be carefully implemented. Conversely the challenges that AI presents should be carefully studied and strategies should be developed to mitigate these challenges from impacted negatively on QA factors. </P>
</Sect>

<Sect>
<H4>Conclusion </H4>

<P>This chapter aimed at establishing a link between AI and QA. To achieve this an introduction to AI was first offered. This was followed by attempting to define AI albeit the literature highlighted that there are various definitions for AI. Ultimately this chapter offered a definition for AI which refers to the simulation of human thinking through the use of computers. Next the use of AI in HE was discussed. This highlighted assessment (evaluation), AI assistance, intelligent tutoring systems, and managing student learning as key uses for AI in HE. </P>

<P>Following this the opportunities and challenges for the use of AI were presented. Key opportunities demonstrated are innovative teaching methods, customisation, automated grading, added instructional time, personalised learning, virtual assistants, and increased enrolment, among others. </P>

<P>The challenges for the use of AI in HE followed next which highlighted some of the following: Academic integrity, generation of automated answers, concerns regarding ethics, widening of the digital divide in developing countries, issues of privacy and bias, and a lack of connection between AI and theoretical frames of reference, to mention a few. </P>

<P>Lastly, a possible link between AI and QA factors was established. Quality assurance factors featured across both the cited challenges and opportunities. From this discussion it was highlighted that the opportunities AI presents link to QA factors and can improve QA factors. On the other hand, there was also a link between the challenges provided and QA factors. However, it came to light that for QA factors not to be compromised the challenges presented by AI must be carefully mitigated. </P>

<P>As universities continue to traverse the changing HE landscape and specifically the pervasive nature of AI in HE, there must be constant reflection about strategies to best leverage the influx of persistent change associated with the HE landscape. </P>
</Sect>

<Sect>
<H4>References </H4>

<P>Aldosari, SA. 2020. The future of higher education in the light of artificial intelligence transformations. International Journal of Higher Education
<Link> 9(3):145-151. https://doi.org/10.5430/ijhe.v9n3p145 </Link>
</P>

<P>Aluko, R., Krull, G., &amp; Mhlanga, E. 2022. Shaping open, distance and e-learning in post school education and training: A call for a revised agenda. Perspectives in Education
<Link> 40(1):1-17. https://doi. </Link>

<Link>org/10.18820/2519593X/pie.v40.i1.1 </Link>
</P>

<P>Bearman, M., Ryan, J., &amp; Ajjawi, R. 2023. Discourses of artificial intelligence in higher education: A critical literature review. Higher Education
<Link> 86:369-385. https://doi.org/10.1007/s10734</Link>

<Link>022-00937-2 </Link>
</P>

<P>Buchanan, BG. 2005. A (very) brief history of artificial intelligence. AI Magazine
<Link> 26(4):53-60. https://doi.org/10.1609/aimag.v26i4.1848 </Link>
</P>

<P>Chatterjee, S. &amp; Bhattacharjee, KK. 2020. Adoption of artificial intelligence in higher education: A quantitative analysis using structural equation modelling. Education and Information Technologies 
<Link>25:3443-3463. https://doi.org/10.1007/s10639-020-10159-7 </Link>
</P>

<P>Chen, L., Chen, P., &amp; Lin, Z. 2020. Artificial intelligence in education: A review. IEEE Access
<Link> 20:75264-75278. https://doi.org/10.1109/ </Link>

<Link>ACCESS.2020.2988510 </Link>
</P>

<P>Crompton, H., &amp; Burke, D. 2023. Artificial intelligence in higher education: The state of the field. International Journal of Educational Technology in Higher Education 20:
<Link>1-22. https://doi.org/10.1186/ </Link>

<Link>s41239-023-00392-8 </Link>
</P>

<P>Dhawan, S. &amp; Batra, G. 2020. Artificial intelligence in higher education: Promises, perils, and perspective. OJAZ Expanding Knowledge Horizon 11:11-22. </P>

<P>Grassini, S. 2023. Shaping the future of education: Exploring the potential and consequences of AI and ChatGPT in educational settings. Education Sciences
<Link> 13(7). 692. 13 pages. http://dx.doi.org/10.3390/ </Link>

<Link>educsci13070692 </Link>
</P>

<P>Huang, J., Saleh, S., &amp; Liu, Y. 2021. A review on artificial intelligence in education. Academic Journal of Interdisciplinary Studies 10(3):206
<Link>217. https://doi.org/10.36941/ajis-2021-0077 </Link>
</P>

<P>Hinojo-Lucena, FJ., Aznar-Diaz, I., Caceres-Reche, MP., &amp; Romero-Rodriguez, JM. 2019. Artificial intelligence in higher education: A bibliometric study on its impact in the scientific literature. Education Sciences
<Link> 9(1):2-9. https://doi.org/10.3390/ </Link>

<Link>educsci9010051 </Link>
</P>

<P>Hutson, J., Jeevanjee, T., Vander Graaf, V., Lively, J., Weber, J., Weir, G., Arnone, K., Carnes, G., Vosevich, K., Plate, D., &amp; Leary, M. 2022. Artificial intelligence and the disruption of higher education: Strategies for integrations across disciplines. Creative Education 
<Link>13(12):3953-3980. https://doi.org/10.4236/ce.2022.1312253 </Link>
</P>

<P>Ishak, NA. &amp; Jiang, CX. 2022. Research on the influence of artificial intelligence technology on the formulation of educational strategies. International Journal of Intelligent Systems and Applications in Engineering 10(2s):70-75. </P>

<P>Jain, S. &amp; Jain, R. 2019. Role of artificial intelligence in higher education – an empirical investigation. International Journal of Research and Analytical Views 6(2):144-150. </P>

<P>Joshi, S., Rambola, RK., &amp; Churi, P. 2021. Evaluating artificial intelligence in education for next generation. Journal of Physics: Conference Series
<Link>. 1714 012039. 13 pages. https://doi.org/10.1088/1742</Link>

<Link>6596/1714/1/012039 </Link>
</P>

<P>Kaul, V., Enslin, S., &amp; Gross, SA. 2020. History of artificial intelligence. Medicine Gastrointestinal Endoscopy
<Link> 92(4):807-812. https://doi. </Link>

<Link>org/10.1016/j.gie.2020.06.040 </Link>
</P>

<P>Kelly, S., Kaye, S-A., &amp; Oviedo-Trespalacios, O. 2023. What factors contribute to the acceptance of artificial intelligence? A systematic review. Telematics and Informatics 77. 101925. 33 pages. 
<Link>https://doi.org/10.1016/j.tele.2022.101925 </Link>
</P>

<P>
<Link>Kengam, J. 2020. Artificial intelligence in education. Available at: https:// </Link>

<Link>www.researchgate.net/publication/347448363_ARTIFICIAL_ </Link>

<Link>INTELLIGENCE_IN_EDUCATION</Link>
. (Accessed on 23 January 2024). </P>

<P>King, MR. 2023. A conversation on artificial intelligence, chatbots, and plagiarism in higher education. Cellular and Molecular Bioengineering
<Link> 16:1-2. https://doi.org/10.1007/s12195-022</Link>

<Link>00754-8 </Link>
</P>

<P>Kuleto, V., Ilić, M., Dumangiu, M., Ranković, M., Martins, OMD., Păun, D., &amp; Mihoreanu, L. 2021. Exploring opportunities and challenges of artificial intelligence and machine learning in higher education institutions. Sustainability
<Link> 13(18). 10424. 16 pages. https://doi. </Link>

<Link>org/10.3390/su131810424 </Link>
</P>

<P>Mohee, R. 2019. Quality assurance: Good practices in ODL in Sub-Saharan 
<Link>Africa. Available at: http://hdl.handle.net/11599/3132. (Accessed </Link>
on 14 April 2023). </P>

<P>Ocana-Fernandez, Y., Valenzuel-Fernandez, L., &amp; Garro-Aburto, L. 2019. Artificial intelligence and its implications in higher education. Propitos y Representaciones
<Link> 7(2):536-568. https://doi. </Link>

<Link>org/10.20511/pyr2019.v7n2.274 </Link>
</P>

<P>Ouyang, F., Zheng, L., &amp; Jiao, P. 2022. Artificial intelligence in online higher education: A systematic review of empirical research from 2011 to 2020. Education and Information Technologies 27:7893
<Link>7925. https://doi.org/10.1007/s10639-022-10925-9 </Link>
</P>

<P>Pelletier, K., Robert, J., Muscanell, N., McCormack, M., Reeves, J., Arbino, N., &amp; Grajek, S. 2023. EDUCAUSE Horizon Report: Teaching and learning edition. Boulder: EDUCAUSE. </P>

<P>Stanislav, I. 2023. The dark side of artificial intelligence in higher education. The Service Industries Journal 43(15/16):1055-1082. 
<Link>https://doi.org/10.1080/02642069.2023.2258799 </Link>
</P>

<P>Zawacki-Richter, O., Marín, VI., Bond, M., &amp; Gouverneur, F. 2019. Systematic review of research on artificial intelligence applications in higher education – where are the educators? International Journal of Educational Technology in Higher Education 
<Link>16(1):1-27. https://doi.org/10.1186/s41239-019-0171-0 </Link>
</P>

<Link><Figure id="LinkTarget_10757">

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_30.jpg"/>
</Figure>
</Link>
</Sect>
</Sect>
</Sect>

<Sect>
<H2>Chapter 4 </H2>

<P>Artificial Intelligence and the Ethics of Tomorrow: Tracing the Shift in </P>

<Sect>
<H3>Information Ethics through History </H3>

<P>Brenda van Wyk </P>

<Link><Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_31.jpg"/>
</Figure>
</Link>

<Link>
<P>Department of Information Science University of Pretoria Pretoria, South Africa </P>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_32.jpg"/>
</Figure>
</Link>

<P>Marlene Holmner </P>

<Link><Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_33.jpg"/>
</Figure>
</Link>

<Link>
<P>Department of Information Science University of Pretoria Pretoria, South Africa </P>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_34.jpg"/>
</Figure>
</Link>

<Sect>
<H4>Introduction </H4>

<P>The emergence and ongoing development of the information era have given rise to various ethical dilemmas that are inherent to the essence of information. IE (information ethics) is an interdisciplinary area that incorporates disciplines such as philosophy, computer science, sociology, law, and others, as highlighted by scholars like Quin (2011) and Floridi (2019). Although the information age is frequently regarded as a contemporary occurrence, this perspective fails to acknowledge its underlying historical origins. The origins of this age can be attributed to previous advancements in communication and Its (information technologies). The information age is currently recognised as a dynamic and ongoing era characterised by swift technological developments that are continuously transforming various aspects of society, such as communication, education, healthcare, and entertainment. </P>

<Link><Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_35.jpg"/>
</Figure>
</Link>

<P>IE is a discipline that combines ethical theories with practical considerations about technology. It acknowledges the intricate and diverse character of ethical dilemmas in the digital world. The subject matter encompasses inquiries on data privacy, cybersecurity, digital rights, and the ethical utilisation of AI (artificial intelligence) and ML (machine learning). With the advancement and widespread use of technology, ethical questions become more complex and important. </P>

<P>Furthermore, IE is dynamic, constantly adjusting to address novel issues and inquiries presented by evolving technologies. For example, the emergence of social media platforms has ignited issues on misinformation, digital identity, and online conduct, while progress in AI and robotics is stimulating conversations about machine ethics, job displacement, and the prospects of human-machine interactions in the future. </P>

<P>As we traverse this changing environment, it is crucial to comprehend and tackle the ethical consequences of our digital existence. In this chapter the findings of a review of recent literature allude to the need to address issues of the privacy and security issues related to data collection, digital identity and authenticity, accessibility and digital inclusivity, legal frameworks, surveillance, and the freedom of expression. The study delves into how AI innovation influences new nuances in IE. The ultimate goal is to establish a world that is not only technologically sophisticated but also firmly rooted in ethical values. </P>
</Sect>

<Sect>
<H4>Contribution of Information Science and Informatics to Information </H4>

<Sect>
<H5>Ethics </H5>

<P>IE draws upon diverse disciplinary insights and historical views to navigate the current revolutionary era. Its aim is to ensure that technical advancements are in harmony with our shared values and societal objectives. Within this environment, it is of utmost importance to thoroughly examine and analyse ethical concepts and frameworks. This examination plays a vital role in determining the course of action for creating a future where technology is utilised to benefit humanity’s highest ideals. </P>

<P>With its early beginnings in 1937, ASIS (the American Society for Information Science – later renamed to the American Documentation Institute) explored aspects of information and information behaviour (Alharbi &amp; Mukhari 2023:38). Two decades later Borko defined information science as the discipline exploring what information and information seeking behaviour are, as well as the flow of information as it is processed for accessibility and use (Borko 1968). However, the person who coined the term information ethics was Robert Hauptman (Froehlich 2000:264). </P>

<P>Information science is concerned with that body of knowledge relating to the origination, collection, organisation, storage, retrieval, interpretation, transmission, transformation, and utilisation of information. Theorists agree on the interdisciplinarity of information science (Wilson 1997; Stahl 2008; Bates 2005). It has both a pure science component, which enquires into the subject without regard to its application, and an applied science component, which develops services and product. </P>
</Sect>
</Sect>

<Sect>
<H4>What is Information Ethics? </H4>

<P>IE, in it its simplest form, refers to the use of information in a way that it does not cause harm. As technology evolves, new ethical challenges and dilemmas arise (Moor 1985; 2020; Burgess &amp; Knox 2019). As a subset of information science and being part of applied ethics, IE examines ethical issues associated with the creation, dissemination, and use of information. It is particularly relevant to the informatics discipline that deals with the management and analysis of information. </P>

<P>On a macro level Zhou, Chen, Berry, Reed, Zhang, and Savage (2020:3010) share that ethics as a branch of philosophy refers to the systematising, defending, and recommending concepts of right and wrong conduct, where rights, obligations, benefits, and fairness principles are explained and deliberated in models and frameworks. </P>

<P>The foundations of IE are instituted in its key principles and constructs. Figure 4.1 offers a summary of the overlapping constructs included in theories and research that will be discussed in greater detail this chapter. </P>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_36.jpg"/>

<Caption>
<P>Figure 4.1: Summary of IE constructs. (Source: Personal archive) </P>
</Caption>
</Figure>

<P>These foundational principles guide the development of ethical frameworks, policies, and practices in the rapidly evolving landscape of IT and data management. Adhering to these principles helps to ensure that information is used responsibly, ethically, and in ways that benefit individuals and society. IE is at the heart of AI and the ethics of AI (Bester &amp; Fisher 2020:2). These ethics are underpinned by the philosophy of information. </P>
</Sect>

<Sect>
<H4>In the Beginning: The Philosophy of Information and Information Ethics </H4>

<P>PI (the philosophy of information) encompasses aspects of philosophy, computer science, information science, cognitive science, and communication studies. It addresses fundamental questions related to the nature of information, its role in the universe, and its impact on knowledge, reality, and society (Bynum 2010:420). As an interdisciplinary field, it explores the nature, properties, and implications of information. Floridi (1999:41) defines PI as a field of philosophy, more specifically looking at critical research into the basic principles of information and the conceptual nature thereof. This field includes the dynamics of information, its uses and applications theory, and computational methodologies (Floridi 2013:38; 2015:42). IE, in turn, is a branch of PI. New developments, such as AI and Gen-AI (generative artificial intelligence), have a profound impact on moral decision-making in our daily lives. </P>

<P>Moral tenets and theories can be complex, and when revisiting the discourses of seminal authors it lays a foundation to meaningfully expand on the ever increasing moral and ethical issues of AI. Floridi (2015:197) stresses the importance to find a balance in practices and experiences in daily lives and uses the metaphor of a tree depending on its roots for the sustainable growth of new branches. Seminal theorists of IE are particularly found in mathematics (Shannon 1948), informatics (Turing 1950), and information science (cf. Mason 1986; Capurro, Eldred, &amp; Nagel 2013). </P>
</Sect>

<Sect>
<H4>Finding the Roots of Information Ethics </H4>

<P>On the challenges encountered when applying human-centred philosophy of morals and ethics in ICTs (information and communication technologies), Birrer (1999:16) laments: ‘It is by no means accidental that most of the discussion about values in technology focuses on normative components that are considered undesirable: Technocracy, dehumanization, inequality, manipulation, loss of control.’ Values and norms are humanistic and not technological constructs (Moor 2006:18). As such, applying moral and ethical theories to information and ICTs is complex. </P>

<P>Expanding upon the first ideas about the essence and intricacies of AI, we arrived at the overarching question: Can computers, like humans be held responsible for their acts? One of the notable futurists and mathematicians was Alan Turing. In the early fifties of the previous century he was a pioneering mathematician and computer scientist, who introduced the Turing Test to assess the intelligence of machines. In 1950 his book, Computing machinery and intelligence (Turing 1950) posed and addressed the questions on whether computers can ‘think.’ The Turing Test involves a human judge engaging in natural language conversations with both a human and a machine, without knowing which is which. If the judge cannot reliably distinguish between the human and the machine based on the conversation, then, according to Turing, we could say that the machine is demonstrating a form of intelligence indistinguishable from that of a human. </P>

<P>The Turing Test remains a widely discussed concept in AI and philosophy, and it has sparked debates about the nature of consciousness, intelligence, and the potential capabilities of machines. The Turing Test is once again in the limelight, this time on the development of LLMs (large language models) used in Gen-AI. </P>
</Sect>

<Sect>
<H4>Early Philosophers and Pioneers of the Information Ethics Theory </H4>

<P>One of the first and influential models in IE is associated with the work of Norbert Wiener. In 1948, Wiener, a mathematician and philosopher pioneered research in cybernetics, a precursor to IE. His work laid the foundation for discussions on the ethical implications of ICT. While Wiener’s work may not fit the contemporary definition of a comprehensive IE model, it has set the stage for ethical reflections on the impact of technology on individuals and society, addressing issues such as control, communication, and the responsibilities associated with the use of information in automated systems. </P>
</Sect>

<Sect>
<H4>IE Models and Frameworks </H4>

<P>Prominent frameworks that have been informing IE research, teachings, and practices include, among others, the 2013 privacy framework by the OECD (Organisation for Economic Cooperation and Development) FIP (fair information practice) (OECD 2013), Floridi’s IE frameworks, the PAPA (privacy, accuracy, property, and accessibility) framework, and Tavani’s informational privacy framework. This chapter provides an overview of IE models and frameworks and discusses whether the capability of existing frameworks is comprehensive enough to provide the ethical guardrails offered to accommodate the ethical considerations prevalent in a fast-changing world. IE typically considers examining the ethical implications and considerations associated with disruptive innovations around the creation, use, and management of information within virtual and immersive environments. The question now is to what extent exiting frameworks are still relevant to underpin the vast new developments. </P>

<P>While the frameworks mentioned above, provide a useful foundation for evaluating ethical issues in technology, including aspects of Gen-AI, there may be potential shortcomings requiring further adaptation to address the unique challenges posed by new developments. Han (2022:1 of 11) reports that the oldest and most referred to terms applied to the ethical use of information in a digital environment are ‘computer’ and ‘IE.’ The finding of this study is that new aspects must be considered, such as cross-cultural aspects on a global scale, devising guidelines for content moderation, ensuring that AI systems are ethical and avoid discriminatory practices, and lastly, considering environmental impact ethics. The conclusion is that one should take the wisdom of existing frames and build a comprehensive, adaptable multidisciplinary framework on it to address ethical dilemmas presented by evolving technological advancements and changing societal norms. Such a framework should encourage responsible innovation by providing the ethical guardrails, and simultaneously safeguarding the rights, wellbeing, and dignity of individuals in virtual spaces. </P>

<Sect>
<H5>Floridi’s Information Ethics Frameworks </H5>

<P>Luciano Floridi is a contemporary philosopher of information, served as UNESCO (United Nations Educational, Scientific, and Cultural Organisation) chair in information and computer ethics. His framework considers the ethical implications of living in an information society. He coined the term ‘infosphere,’ which can be found in the context of the biosphere, which explains the entire informational environment constituted by all informational entities, interactions, processes, and mutual relations (Floridi 2013:132). </P>

<P>Floridi (2016:3; 2018:2) argues that the current information society can be described as a ‘mature information society,’ arguing that it developed through the first- and second-order technologies and ICTs. The term ‘information society’ refers to a society where the creation, distribution, and manipulation of information play a significant and pervasive role in economic, social, cultural, and political activities (Floridi 2018:4). He emphasises the ethical importance of treating information ethically and respecting the rights and interests of individuals and communities in the information age. He states that the rapid development in ICTs created a new information environment, requiring a suitable ethics framework to address unprecedented challenges in the environment, where a new understanding must be created between artificial and real environments (Floridi 2010:219). </P>

<P>Floridi’s continuing research into information creation and its ethical use with AI and AI systems positions information science and IE ethics in AI ethics research towards comprehensive models and theories in a changing field of study (Floridi 2018; 2019). </P>
</Sect>

<Sect>
<H5>Mason and the PAPA Model </H5>

<P>In 1986, Richard Mason published a social framework for addressing the major ethical issues of the information age in his pivotal article, Four ethical issues of the information age (Mason 1986:9). This framework consists of four broad categories of ethical issues namely privacy, accuracy, property, and accessibility, hence PAPA. This PAPA framework is still relatively germane in studying the ethical issues in IT (Woodward, Imboden, &amp; Martin 2011:64). Due to the increasing prevalence of digital data and the inherent dangers connected with its storage and transfer, security becomes a crucial ethical concern in the information age. It is the responsibility of information security to protect data integrity, confidentiality, and accessibility, immediately affecting ethical concepts like privacy, trust, and accountability. </P>

<P>When considering the possible harm caused by data breaches, such as identity theft, financial loss, or privacy invasion, the ethical implications of security become clear. These violations can damage the public’s trust and violate people’s rights (Solove 2006:478). Therefore, it is the responsibility of information professionals to safeguard the integrity of the digital ecosystem, protect stakeholders, and follow ethical standards in their security practices. It has thus become vital to include a ‘S’ to the PAPA acronym. </P>

<Table>
<Caption>
<P>Table 4.1: The PAPAS model (Adapted from Mason 1986; Young, Smith, &amp; Zheng 2020) </P>
</Caption>

<TR>
<TH>Privacy • The expansion of ICTs has resulted in the collection and use of personal information on a huge scale. • This raises ethical considerations about who has access to and uses this information, as well as concerns about the possibility of surveillance and manipulation. • Categories of private information: Private communication, privacy of the body, personal information, and information about one’s possessions. </TH>

<TH>Accuracy • Data integrity becomes increasingly important, as massive databases grow more interconnected. • Technology have made it easy for false or misleading information to spread quickly and widely, which can have serious consequences for individuals and society. • How to combat misinformation and disinformation. • How to ensure accurate and reliable access. </TH>
</TR>
</Table>

<Table>
<TR>
<TD>Property </TD>

<TD>Accessibility </TD>
</TR>

<TR>
<TD>• The internet and digital </TD>

<TD>• ICTs increase access to </TD>
</TR>

<TR>
<TD>technologies have made it </TD>

<TD>information, but equality in </TD>
</TR>

<TR>
<TD>easy to share and distribute </TD>

<TD>access is not yet achieved and not </TD>
</TR>

<TR>
<TD>information, including </TD>

<TD>everybody can equally participate </TD>
</TR>

<TR>
<TD>copyrighted material. </TD>

<TD>in society. </TD>
</TR>

<TR>
<TD>• The free flow of information is </TD>

<TD>• Expanded literacy and reasoning </TD>
</TR>

<TR>
<TD>a risk to intellectual property </TD>

<TD>skills are essential for one’s </TD>
</TR>

<TR>
<TD>and rights of creators and </TD>

<TD>participation in the growth of any </TD>
</TR>

<TR>
<TD>owners of this material. </TD>

<TD>society (Mason 1986). </TD>
</TR>

<TR>
<TD>• IP (intellectual property) </TD>

<TD>• Access to the essential </TD>
</TR>

<TR>
<TD>refers to the ownership </TD>

<TD>technologies is required, while </TD>
</TR>

<TR>
<TD>of ideas and creative </TD>

<TD>information must be available to </TD>
</TR>

<TR>
<TD>works, including patents, </TD>

<TD>be used and consumed. </TD>
</TR>

<TR>
<TD>trademarks, and copyright. </TD>

<TD/>
</TR>

<TR>
<TD>• Each of these instruments of </TD>

<TD/>
</TR>

<TR>
<TD>protection is governed by a set </TD>

<TD/>
</TR>

<TR>
<TD>of laws and regulations. </TD>

<TD/>
</TR>

<TR>
<TD>• Some of these rules are not </TD>

<TD/>
</TR>

<TR>
<TD>universal and they only apply </TD>

<TD/>
</TR>

<TR>
<TD>in specific countries. </TD>

<TD/>
</TR>

<TR>
<TD>Security • Security involves such a broad variety of concerns in the context of the information age – from data breaches to cyberattacks. Information security in its most basic form aims to protect the availability, confidentiality, and integrity of data. </TD>

<TD/>
</TR>
</Table>

<P>AI and other emerging technologies present new vulnerabilities and the possibility of abuse, greatly complicating the security picture. </P>
</Sect>

<Sect>
<H5>Tavani’s Informational Privacy Framework </H5>

<P>Richard Tavani, a scholar known for his work in computer ethics and IE, wrote a book, Ethics and technology: Controversies, questions, and strategies for ethical computing which is widely used in the field of computers and IE. Tavani has explored ethical issues related to IT, including privacy concerns. He has identified three types of privacy: </P>

<L>
<LI>
<Lbl>• </Lbl>

<LBody>Mental or psychological privacy; </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>physical privacy, also referring to access privacy; and </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>decisional and informational privacy (Tavani 1999:138-140). </LBody>
</LI>
</L>

<P>In the context of informational privacy, the concept of informed consent is crucial. Users should be aware of how their information is being collected, processed, and shared, and they should have the ability to make informed decisions about whether to share their information. Tavani emphasises the principle of data minimisation, which suggests that organisations should only collect and retain the minimum amount of information necessary for a specific purpose. Frameworks addressing informational privacy often highlight the importance of giving individuals control over their personal information. This includes mechanisms for individuals to access, correct, and delete their data. </P>

<P>Transparency in data practices and accountability for how organisations handle information are key elements. Tavani discusses how organisations should be transparent about their data practices and accountability for any misuse of information (Tavani 1999:137). Tavani’s work on privacy adds value to the study of the ethics of AI, as privacy on many levels is still a concern in the application of AI algorithms. </P>
</Sect>

<Sect>
<H5>Charles Ess </H5>

<P>Ess is a philosopher and scholar who has made significant contributions to the field of media ethics and IE, particularly in the context of computer and internet ethics. He studied the ethical challenges stemming from privacy, identity, digital communities, and social media platforms. He is particularly concerned with feminist ethics where gender-related ethical dilemmas such as harassment and bullying manifest in gender roles in cyberspace. </P>

<P>Ess advocates for the recognition of diverse cultural values in a global context. He has contributed to the theoretical foundations of IE (Ess 2008). His contributions extend to the development of global IE frameworks, emphasising the need for a global dialogue on ethical issues related to ICTs, considering diverse cultural, social, and political contexts (Ess 2014). </P>

<P>Ess has explored ethical implications associated with emerging technologies, including AI and robotics. His work addresses issues such as accountability, transparency, and the societal impact of intelligent systems. </P>
</Sect>

<Sect>
<H5>Raphael Capurro </H5>

<P>Capurro is a philosopher and information scientist who has made significant contributions to the field of IE. In 1999 he established the ICIE (International Center for Information Ethics), and currently the centre has a diverse membership covering the fields of informatics, information science, computer science, and more (Froehlich 2000). Capurro has contributed theoretical insights and frameworks that have influenced digital ontologies. His frameworks focus on freedom, privacy, and identity in an online and cyberworld (Capurro et al. 2013:12). Capurro warns that the tendency to see everything within the context of a Western philosophy, must be challenged and different cultural notions must be considered. </P>

<P>One notable contribution by Capurro is the concept of ‘hermeneutical ethics of information.’ Hermeneutics, in a broad sense, refers to the interpretation of meaning. Capurro emphasises the importance of understanding information within a cultural and historical context, considering the multiple layers of interpretation that can be applied to information. This perspective recognises the ethical implications of interpretation, representation, and communication in the information domain. </P>

<P>Understandably, the field of IE is multidisciplinary, resulting in various frameworks and models. As the scholars discussed above are not deplete, many others also contributed to the understanding of ethical issues in IT and communication. The frameworks developed over several decades and paved the way for new frameworks and discourse on the ethics of AI, focussing on the responsible use of AI and GenAI. </P>
</Sect>
</Sect>

<Sect>
<H4>The Ethics of AI </H4>

<P>The recent hype around AI and Gen-AI created the illusion that AI is the ultimate panacea (Floridi 2020:2). Views are ranging from </P>

<P>ultimate doom to a hailing of AI as the beginning of everything. This is not realistic and authors such as Floridi (2019; 2020) as well as Sartori and Theodorou (2022:10 of 11) remind us that during the many decades of the becoming of AI, there were many wins and losses. What cannot be denied, is the rapid tempo of recent developments of LLMs and prompt engineering. The sociotechnical nature of AI and Gen-AI remains uncontested, where the development, deployment, and impact of AI are deeply intertwined with societal factors, human behaviour, and ethical considerations. </P>
</Sect>

<Sect>
<H4>The Nature of AI and Gen-AI </H4>

<P>AI and AI ethics are not new fields of research and study. Floridi (2020:3), as well as Sartori and Theodorou (2022:2 of 11) point to the many ‘winters’ and ‘summers’ that AI has already endured. In 1956, a group of mathematicians and computer scientists met at the Dartmouth College, New Hampshire, to discuss the emerging field of AI (Strickland 2021). It was here that John McCarthy coined the term ‘artificial intelligence,’ which he argued, ‘would explore the hypothesis that every aspect of learning or any other feature of intelligence can in principle be so precisely described that a machine can be made to simulate it’ (Strickland 2021:27). Bartneck, Lütge, Wagner, and Welsh (2020:18) allude that a complete definition of AI is still in the making, but they conceptualise AI as the simulation of human intelligence processes by machines, especially computer systems. Specific applications of AI include 
<Link>expert systems</Link>
, 
<Link>natural language </Link>

<Link>processing</Link>

<Link>, speech recognition, and machine vision. </Link>
</P>

<P>The emergence of Gen-AI technologies expanded significantly on existing AI technologies, and opened numerous opportunities as well as future challenges. As part of ML Gen-AI uses algorithms that depend on large datasets. Current Gen-AI models and tools can produce original material, such as text, images, music, and even complex designs, based on their training data. Floridi (2024) posits that these innovations drastically change the supply and demand of information. </P>

<Sect>
<H5>The AI Life Cycle </H5>

<P>To understand AI ethics, the life cycle of AI must be considered. It is not a process that should be regarded as a separate phase, as Ng, Kapur, Blizinsky, &amp; Hernandez-Boussard (2022:2248) claim that it must be examined with due cognisance of their impact in concert and the interactions between the phases. </P>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_37.jpg"/>

<Caption>
<P>Figure 4.2: The AI life cycle. (Source: Ng et al. 2022:2247) </P>
</Caption>
</Figure>
</Sect>

<Sect>
<H5>The AI Boom </H5>

<P>The remarkable expansion and advancement observed in the field of AI is commonly referred to as the ‘AI boom.’ The swift progress in AI technology heightened corporate pursuits, and the expanded utilisation of computer capabilities in industry have contributed to enhanced customer experiences. Consequently, there has been a surge in funding towards AI technologies and the incorporation of AI solutions into various goods and services. It is important to acknowledge that the AI industry’s rapid growth is accompanied by various obstacles and factors to consider, such as ethical dilemmas, the requirement for responsible AI advancement, and the resolution of potential biases within AI systems. Furthermore, like every technological advancement, there could be phases of heightened enthusiasm followed by adaptations and development in the industry. The rate of AI progress and its influence on society will probably remain a topic of debate and exploration. </P>
</Sect>
</Sect>

<Sect>
<H4>AI Ethics </H4>

<P>Essentially, AI ethics is a system of moral principles and techniques intended to inform the development and responsible use of AI 
<Link>technology (Bartneck </Link>
et al. 2020:2247). Han (2022:2 of </P>

<P>11) reminds us that there is no clear and agreed definition of AI, as found in other fields of ethics such as computer ethics and IE. In literature there are overlaps in terminology and often AI ethics and robotic ethics are used interchangeably (cf. Figure 3). As AI has become integral to product platforms that incorporate ethical considerations and values in the development and deployment of AI and services, organisations are starting to develop AI codes of ethics. Han (2022:3 of 11) refers to Moor (1985:266) who defines computer ethics as an analysis of the nature and social impact of computer technology and the corresponding formulation and justification of policies for the ethical use of such technology (Han 2022:3 of 11). Hauptman who was the first to use the term ‘IE’ in 1988, already addressed principles of censorship, privacy, access to information, balance in collection development, copyright, fair use, and codes of ethics (Froehlich 2000:265). </P>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_38.jpg"/>

<Caption>
<P>Figure 4.3: The multidisciplinary field of AI ethics. (Source: Han 2022:5 of 11) </P>
</Caption>
</Figure>

<P>In this model, cyborg – short for cyborg organism – and cyborg ethics refer to a combination of biological and artificial components. These enhanced capabilities are often present in biomedical technologies such as pacemakers, implants, prosthetics, or other technological enhancements that merge with the biological components of the organism. Han’s model provides a contemporary view of the span and complexity of new IE. </P>

<P>An AI code of ethics, also called an AI value platform, is a policy statement that formally defines the role of AI as it applies to the development and wellbeing of humans. This term should not be confused with AI value-driven platforms, looking at capabilities and benefits of systems. The purpose of an AI code of ethics is to provide stakeholders with guidance when faced with an ethical decision regarding the use of AI (Ibiricu &amp; Van der Made 2020:396). An AI value platform is generally understood as technology purposed to add value via AI. This could involve a combination of software, hardware, and services designed to provide solutions that address specific business challenges or opportunities. </P>

<P>It is important to note that the interpretation of the AI value platform may vary depending on the context and the specific goals of the entity using or providing such a platform. Organisations developing AI solutions are often looking for platforms that not only facilitate the technical aspects of AI but also deliver measurable values in terms of efficiency, innovation, and competitive advantage. For the latest and most accurate information, it is recommended to check specific industry announcements, publications, or product documentation. </P>

<P>It is quite incidental how Asimov’s science fiction short stories around 1950 referred to an ethical code where the primary law prohibits robots from engaging in any actions that cause harm to humans or from neglecting to act when harm could be prevented (Asimov 1984:9). The second law mandates that robots must comply with human commands, except when those commands conflict with the first law. The third law mandates that robots prioritise self-preservation, if it aligns with the principles outlined in the previous two laws. </P>
</Sect>

<Sect>
<H4>AI Ethics Frameworks and Principles </H4>

<P>AI ethics must consider the ethical implications and societal impacts of AI technologies. Building on existing IE frameworks, new theoretical frameworks have been proposed to guide discussions and decision-making in this domain. These include the IEEE (Institute of Electrical and Electronics Engineers) global initiative on ethics of autonomous and intelligent systems and the COMEST (Commission mondiale d’éthique des connaissances scientifiques et des technologies) rights-based model: The universal declaration on ethical considerations regarding artificial intelligence and autonomous systems. </P>

<Sect>
<H5>The Asilomar Principles </H5>

<P>In 2017, a high-level conference called, Future Life Conference was held in California (Morandín-Ahuerma 2023:6), resulting in the Asilomar AI principles, consisting of 23 guidelines for the research 
<Link>and development of AI outlined developmental issues, ethics, and </Link>
guidelines for the development of AI, with the goal of guiding the development of beneficial AI. </P>

<P>The 23 principles were developed by a group of AI 
<Link>researchers, robotics, technology experts, and legal scholars from </Link>
different universities and organisations. These experts organised the AI principles at the Asilomar conference while discussing the future of AI and responsible AI regulation. While these principles are not legally binding, they serve as a reference point for researchers, policymakers, and industry stakeholders. </P>

<P>At the forefront of the discussions to arrive at the principles was the agreement that ethics must guide all AI research to ensure privacy and fairness. Furthermore, AI research should not result in undirected knowledge. There was an agreement that ethical AI rests on cross-discipline cooperation. In creating a transdisciplinary AI research culture, trust and transparency will be uncalculated. </P>

<P>Even though the conference was attended by high-profile entrepreneurs like Elon Musk, the decision was that data and information security must have preference to competitiveness (Morandín-Ahuerma 2023:6). Over and above being transparent about failures, judicial transparency is important where AI is used in decision-making and any legal matters such as sentencing and accountability of the use of AI in legal systems. The risk of autonomous AI systems was realised, and the conference agreed that there should always be an alignment with human values and the dichotomy of freedom versus privacy in terms of the use of private data, and that regulations should be such that individuals are not unduly restricted to access. The conference and subsequent decisions stressed the importance of human control. The delegation of these types of autonomous tasks must reside with humans. The conference addressed the future capabilities of AI that should be regulated and that the future of the planet should be the guiding principle. </P>

<P>A critically important discussion at the conference was about AI and warfare. It was emphasised that human life must be valued and that AI armoury should be designed with great care. This must be done with international cooperation and transparency. The conference gave due consideration to the dangers of AI, and the importance to be vigilant and plan for possible risks. In conclusion, the main agreement was that AI must be developed and used for the greater good. </P>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_39.jpg"/>

<Caption>
<P>Figure 4.4: Principles of beneficial AI. (Source: Adapted from Morandín-Ahuerma 2023:20-24) </P>
</Caption>
</Figure>

<P>It is worth noting that these principles represent a consensus among the participants of the Asilomar conference at the time, as the discussions on AI ethics and guidelines have continued to evolve since then. Various organisations and initiatives around the world continue to contribute to the development of ethical frameworks for AI. In addition to the principles mentioned in Figure 4, trust and fairness in AI, AI and social biases, and social justice and cultural sensitivity are elements of ethical importance. </P>
</Sect>

<Sect>
<H5>Trust and Fairness in AI </H5>

<P>When discussing AI ethics, the topic of algorithm ethics invariably emerges. Algorithms are knowingly or unknowingly part of many aspects of life (Tsamados, Aggarwal, Cowls, Morley, Roberts, Taddeo, &amp; Floridi 2021). Think of the recommender systems of streaming services or online purchases, all based on algorithms. Benk, Tolmeijer, Von Wangenheim, &amp; Ferrario (2022:2 of 12) underpin the importance of creating trust in AI-based systems towards improved ethical and effective application and use. They continue to lament that the paucity in literature, a shared understanding, and measurement of what trust within the context of AI means, have negatively impacted progress in the field (Benk et al. 2022:2 of 12). Decision-making algorithms are increasingly being used in industry, of which legal decisions and healthcare are at the forefront (Vaassen 2021:1 of 20). A growing number of authors (cf. e.g., Burrell, 2016:3 of 12; Benk et al. 2022:2 of 12) refer to the black box concept to explain the lack of transparency and interpretability in certain advanced machine learning models, especially with deep neural networks. In this context, the term ‘opacity’ indicates that there are instances when the outcomes of an algorithm and how it was derived are not clear (Vaassen 2021:2 of 20). The opacity of black-box algorithms, particularly in the context of ML models, can give rise to various problems and challenges (cf. Figure 4.5). </P>

<P>It is furthermore possible that bias can transpire which will influence decisions (Burrell 2016:2 of 12; Vaassen 2021:3 of 20). Establishing accountability and responsibility for algorithmic outcomes becomes challenging, which may have legal and ethical implications. </P>

<P>Black-box algorithms may inadvertently learn and perpetuate biases present in the training data. If unchecked, discriminatory outcomes may occur, reinforcing or exacerbating existing societal biases. Users may be reluctant to rely on or accept algorithmic decisions if they cannot understand or validate the decision-making process. </P>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_40.jpg"/>

<Caption>
<P>Figure 4.5: Opacity in black box algorithms. (Source: Based on Burrell 2016) </P>
</Caption>
</Figure>

<P>It stands to reason that areas with critical societal impact such as financial, health, and legal aspects require firm regulatory frameworks and call for more clarity on explainability, transparency, and trustworthiness of AI applications. </P>

<P>Algorithm Ethics </P>

<P>With obvious tangents and overlapping with AI ethics, algorithm ethics refers to the ethical considerations associated with the design, development, deployment, and impact of algorithms. Algorithms are sets of instructions or rules followed by a computer programme to perform a specific task or solve a particular problem. Algorithms can inadvertently perpetuate biases present in training data, leading to unfair or discriminatory outcomes (Vaassen 2021:5 of 20). Ethical algorithm design aims to minimise bias, ensure fairness, and prevent discrimination, especially in areas like hiring, lending, and law enforcement. </P>

<P>Whereas algorithms process sensitive personal information, it raises concerns about privacy infringements. Ethical algorithm development involves implementing privacy-preserving measures to protect individuals’ data and ensuring compliance with data protection regulations. Transparent and explainable algorithms enhance accountability, trust, and understanding, allowing users to comprehend and channel decisions. Identifying the responsibility for algorithmic outcomes can be challenging, especially in complex systems. Ethical algorithms include mechanisms for accountability, making it clear who is responsible for the decisions made by the algorithm and providing avenues for redress in case of errors. Algorithms may be vulnerable to malicious attacks or manipulation. Ethical algorithms involve transparent communication with users, ensuring they understand how algorithms impact them and giving them control over their data and preferences. Ethical algorithms prioritise inclusivity and accessibility, aiming to minimise biases and ensure equitable outcomes for diverse populations. Ethical algorithms include mechanisms for continuous monitoring, evaluation, and improvement to ensure their ongoing effectiveness and fairness. </P>
</Sect>

<Sect>
<H5>AI Biases </H5>

<P>AI is prone to a range of biases, of which social biases is of particular concern. Friedman and Nissenbaum (1996:332) define three types of bias: Preexisting bias based on social practices and attitudes likely embedded in data used for ‘training;’ technical bias based on design constraints in hardware and software likely in AI to be associated with limits of available data and a lack of qualitative nuance; and emergent bias arising from changing the used context such that accurate meaning is lost or distorted when viewed through a different prism. The potential of bias to permeate at least some categories of AI applications is a significant concern (Niederman &amp; Baker 2022). Other AI biases include algorithm biases and data biases. </P>
</Sect>

<Sect>
<H5>AI, Social Justice, and Cultural Sensitivity </H5>

<P>AI ethics in the medical industry primarily focuses on the objective of guaranteeing impartiality and preventing biases in AI-powered decision-making. Given the varied patient demographics and the importance of fair treatment, this is of utmost importance. AI systems in healthcare necessitate careful design and ongoing monitoring to mitigate the impact of inherent biases in training data on diagnostic or treatment suggestions. This is also connected to the wider problem of digital exclusion, where marginalised or underrepresented groups may not have equitable access to AI-enhanced medical services, therefore worsening existing healthcare inequalities. </P>

<P>Within the realm of law, the utilisation of AI implies a distinct array of ethical dilemmas. AI systems have the power to aid in legal research, analyse documents, and even make predictions about case outcomes. However, they also give rise to significant concerns around transparency, accountability, and fairness in judicial proceedings. AI utilisation in sentencing or parole determinations need a meticulous regulation to guarantee that algorithmic suggestions do not perpetuate past prejudices or violate legal rights. </P>
</Sect>

<Sect>
<H5>Digital Exclusion </H5>

<P>Digital exclusion involves not only the lack of access to technology, but also the inability to comprehend and engage with digital systems. With the increasing integration of AI into daily life, there is a risk that certain groups may face a disproportionate disadvantage due to the digital divide, as they may lack the necessary resources or expertise to fully utilise the breakthroughs in AI. </P>

<P>Ethical intelligence in AI pertains to the creation of AI systems that not only exhibit efficient task performance but also adhere to ethical and moral principles. This task is especially difficult due to the subjective character of ethics and the intricate process of programming machines to comprehend human values. </P>
</Sect>
</Sect>

<Sect>
<H4>AI Risks </H4>

<P>In the dynamic world of technology, ethical risks of AI do not only feature in its use, but also in its design. Addressing challenges of biases in data, algorithms, as well as training set limitations leaves room for deeper discourse and ongoing efforts to improve data quality, enhance algorithmic fairness, ensure robustness, and consider ethical implications throughout the AI development lifecycle (cf. Figure 4.2). </P>

<P>AI could be prone to inaccurate information and misrepresentation. Some of these challenges include incidences of deep fake, identity theft, and privacy issues in using facial recognition, to name but a few. </P>

<Sect>
<H5>Deep Fake </H5>

<P>The utilisation of AI to generate authentic yet fabricated audio or video content, known as deepfake technology, poses a significant ethical quandary. Although it can be used for harmless purposes such as amusement, it also has the capacity to be misused for purposes such as disseminating false information, impersonating others, and infringing upon privacy and permission. </P>
</Sect>

<Sect>
<H5>AI and Facial Recognition </H5>

<P>Facial recognition technology, a controversial form of AI, has advantages in terms of security and customisation, but also gives rise to significant worries regarding privacy, monitoring, and the possibility of misuse. The ethical use of these technologies is further complicated by misidentification and prejudices, notably towards specific ethnic groups. </P>
</Sect>

<Sect>
<H5>Identity Theft </H5>

<P>Ahmed (2020:2) alludes that identity theft is on the rise but warns that this crime is still ill-defined. In essence it entails the wrongful acquirement and utilisation of another person’s personal information, such as their name, social security number, credit card information, or other identifying details, thus without their permission. The goal of identity theft is typically to commit fraud, financial theft, or other criminal activities using the victim’s identity. Some of the methods include phishing, data breaches, dumpster diving, manipulative social engineering, and skimming of credit cards. Most identity crimes take place where people illegally obtain information by purchasing it from others, skilled in accessing and using information online (Ahmed 2020:193). These crimes cause tremendous financial, emotional, and at times, reputational damage. </P>
</Sect>
</Sect>

<Sect>
<H4>AI Threats </H4>

<P>A dynamic invention like AI invariably poses some threats to society. Much of these lies in the misuse of AI like cyberwarfare, and there is also the overarching fear of loss of human control when autonomous systems move closer to super-intelligence. </P>

<Sect>
<H5>The Threat of Singularity </H5>

<P>The term ‘singularity’ was first coined by John von Neumann in 1948 (Ulam 1958). Singularity, as described by scholars such as Shanahan (2015), posits a future moment when AI systems attain a state of independent functioning as a result of the advent of machine awareness and superhuman intellect. This concept imagines a situation in which AI exceeds human cognitive capabilities, resulting in a revolutionary and perhaps unmanageable stage in technology development. </P>

<P>Singularity gives rise to significant ethical, intellectual, and existential inquiries. The main apprehension revolves around the possible relinquishment of human authority over technology. The inherent uncertainty of AI systems presents substantial hazards, such as the potential for destructive actions that may harm humans or the environment. The issue of whether the singularity is presently in progress is a subject of contention among technologists, ethicists, and futurists. The swift progress in AI technology implies that the possibility of singularity, although hypothetical, cannot be completely disregarded. </P>

<P>The concept of singularity has a complex and diverse impact on AI ethics and poses a challenge to our existing comprehension of ethical frameworks in a society where AI possesses autonomous decision-making abilities, as the task of assigning accountability for the consequences of those actions, whether they are advantageous or detrimental, becomes a multifaceted matter. </P>
</Sect>
</Sect>

<Sect>
<H4>Ethics of Gen-AI </H4>

<P>It should be noted that the technology of Gen-AI is not new. Gen-AI was introduced in the 1960s in chatbots. However, it was not until 2014, with the introduction of GANs (generative adversarial networks) – a type of ML algorithm – that Gen-AI could create convincingly authentic images, videos, and audio of real people. Gen-AI is a type of AI technology that can produce various types 
<Link>of content, including text, imagery, audio, and synthetic data. </Link>
Gen-AI refers to a class of AI systems that can generate new content, such as images, text, or even music, that is not explicitly programmed. These systems are designed to understand and learn patterns from existing data and then use that understanding to create entirely new data that resemble the training set. Bandi, Adapa, and Kuchi (2023:1 of 60) explain that Gen-AI is geared to develop algorithms able to create synthetic data. There are several types of Gen-AI models, and one of the most notable is the class of generative models based on neural networks. </P>

<P>Gen-AI’s ability to generate data resembling real-life conditions, allows for content creation (Bandi et al. 2023:1 of 60). It is a further development of Web 3.0. Gen-AI has found applications in various domains, including text generation in NLP (natural language processing) models, such as GPT 3 (generative pre-trained transformer 3) and GPT 4 and data augmentation of data training sets, and can generate coherent and contextually relevant text. These models can be used for tasks like writing articles and generating creative content. Then there is the image generation, where generative models can create realistic images that resemble photographs of human faces, animals, or scenes. </P>

<P>While Gen-AI has shown remarkable capabilities, there are also ethical concerns, especially regarding the potential misuse of the technology, such as the creation of deepfakes for malicious purposes. As a result, ongoing research and development in the field aim to address both the positive and negative implications of Gen-AI. </P>

<Sect>
<H5>Large Language Models and Gen-AI Ethics </H5>

<P>There are overlaps and tangents in ethical concerns in the use and applications of Gen-AI, but the nature of LLMs adds an element of further concern – that of bias and privacy. LLMs refer to advanced AI models that are specifically designed for NLP tasks. </P>

<P>The primary architecture used for LLMs is the transformer architecture, which are neural networks. The transformer architecture allows for parallelisation and efficient processing of sequences, making it well-suited for handling natural language. Transformer models apply an evolving set of mathematical techniques, called attention or self-attention, to detect subtle ways. Even distant data elements in a series influence and depend on each other. LLMs have analytical abilities, which can be used for chatbots and virtual agents. The transformative nature of large language models lies in their ability to fundamentally change how we interact with and leverage natural language in various fields, impacting communication, creativity, productivity, and research. These LLMs are often pre-trained on massive datasets and fine-tuned for specific tasks. The use of large-scale language models harbours several risks and ethical concerns (Weidinger, Mellor, Rauh, Griffin, Uesato, Huang, Cheng, Glaese, Balle, Kasirzadeh, Kenton, Brown, Hawkins, Stepleton, Biles, Birhane, Haas, Rimell, Hendricks, Isaac, Legassick, Irving, &amp; Gabriel 2021:7 of 64). Ethical concerns in the use of LLMs include misinformation and disinformation, perpetuating bias, a lack of explainability, the risk of manipulation, as well as privacy and security concerns. </P>
</Sect>

<Sect>
<H5>LLMs and Training Data </H5>

<P>A LLM model is a statistical model that predicts the probability of a sequence of words. Based on DL (deep learning) – a branch of ML – LLMs import vast amounts of data, openly available from books, web pages, or similar contents (Paass &amp; Giesselbach 2023:21). Then patterns are identified to get connections between words. A LLM with a large feed of data can generate content better, faster, and more accurately, which then generates new content based on the prompts provided by the user. Training data in LLMs involve exposing the model to vast amounts of diverse text from a wide range of sources. </P>

<P>It is important to note that the success of LLMs like GPT 3 is attributed to their ability to generalise from diverse training data. The models can then generate coherent and contextually relevant text across a wide range of topics and tasks. The ethical considerations related to the use of such models, including biases in training data and potential societal impacts, are also important aspects to consider in the development and deployment of these models. </P>

<P>Table 4.2:Summary of Gen-AI ethical challenges (Adapted from Weidinger et al. 2021) </P>

<Table>
<TR>
<TD>LLM Ethical Concerns </TD>

<TD>LMM May Predict as Follows </TD>
</TR>

<TR>
<TD>Discrimination </TD>

<TD>Social stereotyping based on gender, religion, orientation, ability, and age </TD>
</TR>

<TR>
<TD>Toxicity and exclusion Unjust, prejudiced, oppressive use of natural language in models </TD>
</TR>

<TR>
<TD>Harm caused by misinformation </TD>

<TD>False, misleading, poor-quality information </TD>
</TR>

<TR>
<TD>Intentional malicious use </TD>

<TD>Use LMM for illegal surveillance, fraud, scams, deepfake, identity theft, and censorship </TD>
</TR>

<TR>
<TD>Information hazards </TD>

<TD>LMM predictions include privacy and safety risks – leaking private information </TD>
</TR>

<TR>
<TD>Harm from human-computer interaction </TD>

<TD>Dependency, too trusting, psychological profile and vulnerability, ethnic profiling </TD>
</TR>

<TR>
<TD>Environmental harm </TD>

<TD>Economic disparity, energy, and water demands </TD>
</TR>

<TR>
<TD>Access </TD>

<TD>Disparate access, exclusion, literacy, and skills constraints </TD>
</TR>
</Table>

<P>LLMs like ChatGPT generate responses based on patterns learned from large datasets (training data), and at times they may generate content that appears to be contextually appropriate, although it is not factually correct. This is referred to as AI hallucination and is a growing ethics concern. While LLMs can be powerful tools for generating human-like text, they do not have a genuine understanding or awareness yet. </P>

<P>AI programmes, more specifically LLMs, have the potential to provide deceptive, erroneous, or wholly fictional outputs. These present notable ethical dilemmas, particularly when such technologies are employed for crucial decision-making or distributing information. To mitigate hallucinations and improve the reliability of LLMs, R&amp;D (research and development) efforts focus on refining training methodologies, incorporating diverse datasets, addressing biases, and enhancing the model’s ability to handle nuanced and contextually sensitive information. </P>
</Sect>
</Sect>

<Sect>
<H4>Intergovernmental and other Initiatives in Support of New Dimensions in Dynamic IE Ethics Innovation </H4>

<P>Floridi (2014:218) urges that any information society must be equipped with sustainable IE, and that this be made well known. It is here where intergovernmental organisations have a critical role to play, moreover in the time where disruptive technologies could potentially add to chaotic and unethical consequences. An example of the speed at which innovation takes place is the rapid development of the Gen-AI by OpenAI in bringing out its chatbot. </P>

<P>UNESCO has made great strides in addressing frontier challenges in IE, the ethics of AI, and the ethics of neurotechnology. In addition, they are addressing issues on climate engineering and the IoT (internet of things), which can no longer be separated from IE and ethics of AI. Neuroethics is an emerging field. Combined with AI these techniques can enable developers – public or private – to abuse cognitive biases and trigger reactions and emotions without consent (Farisco, Salles, &amp; Evers 2018:718). Whereas neural networks in AI are concerned with the development and application of algorithms for pattern recognition, classification, regression, and other task, neuroscience is a multidisciplinary fields of study that examines the ethical, legal, and social implications of neuroscience. Essentially, it encompasses the scientific study of the nervous system that regulates all human cognition, behaviour, and functions. The field encompasses a broad range of topics, from understanding how individual neurons function to exploring complex behaviours and cognitive processes. However, there is a link to AI because the more people discover how the nervous system works in relation to our world, the more there will be neuro-AI tangents (Berger &amp; Rossi 2022:2054) together with the encompassing ethical challenges. An example of neuroscience and AI is wearable devices that integrate technologies from both neuroscience and AI to monitor, analyse, or interact with the brain and its functions. These wearables often serve various purposes, including research, healthcare, and personal wellbeing. </P>

<P>Based on their ability to collaborate across barriers such as borders, language, and cultural differences, organisations with the value and importance such as UNESCO, the OECD, and others are critical in researching and guiding society and the world in the ethical deployment of AI technologies. UNESCO emphasises the importance of promoting inclusivity in AI deployment for enhanced respect of cultural diversity. For this they provide ethical guidelines to address issues such as transparency, accountability, fairness, and the impact of AI on human rights. They advocate better education and capacity building specifically in the fields of IE and ethics of AI. However, their foremost value lies in the opportunities created to foster international cooperation and dialogue on AI ethics. This involves collaboration with other international organisations, governments, academia, industry, and civil society to develop shared principles and norms. </P>

<P>In the 1970s and 1980s, the OECD encouraged member countries to develop guidelines and legislation to manage data responsibly (Wright, De Hert, &amp; Gutwirth 2011:119). In the seven-year stint between 1973 and 1980, one-third of the OECD’s 30 member countries enacted legislation intended to protect individuals against the abuse of data related to them and to give individuals the right of access to data with a view to checking their accuracy and appropriateness. The OECD is acutely aware of the tension that exists between the need of free flow of information and protecting of personal data (Wright et al. 2011:120). The 2013 privacy framework by the OECD and later in 2023 the OECD’s Good practice principles for data ethics in the public sector (OECD 2023), offer guardrails to the ethical use of information. </P>

<Sect>
<H5>An Intergovernmental Organisation’s Role in Fair Information Practice </H5>

<P>As early as 1980, the OECD’s privacy guidelines outlined principles for the protection of privacy and personal data (cf. OECD 2002). These principles are not legally binding, but they have influenced the development of privacy laws and frameworks around the world. The key principles include putting limitations on the illegal collection of personal information, ensuring data quality. </P>

<P>These principles are widely considered foundational for the development of privacy laws and regulations. Various countries and regions have adapted and implemented these principles in their own privacy frameworks. It is essential to check the latest developments and specific regulations in one’s jurisdiction, as privacy laws can vary significantly. </P>
</Sect>

<Sect>
<H5>The OECD Fair Information Practice </H5>

<P>The OECD FIP principles are fundamental guidelines that underpin data protection and privacy policies worldwide. The principles were formulated by the OECD in the 1980s with the aim of addressing apprehensions regarding the collection, processing, and utilisation of personal information. The FIP principles include important elements such as restricting data collecting to essential and legal purposes, guaranteeing data accuracy, defining data security rules, and promoting a dedication to openness and transparency in data practices. In addition, they emphasise the significance of individual engagement, affording individuals the privilege to obtain and rectify their personal information. These principles have had a significant impact on many national and international laws and recommendations regarding data protection. They have provided the basis for modern privacy rules such as the GDPR (general data protection regulation) in the EU (European Union). The relevance of the OECD FIP principles persists in the present day, as they offer a structure for managing the progress of data processing technology while safeguarding individual privacy rights. </P>
</Sect>
</Sect>

<Sect>
<H4>Pointers for Responsible AI </H4>

<P>Table 4.3 below is based on an overview of literature, where specific deductions on essential elements that will ensure responsible AI use and development must be considered. </P>

<P>Table 4.3:Elements of responsible AI development and use </P>

<Table>
<TR>
<TD>Element </TD>

<TD>Use </TD>
</TR>

<TR>
<TD>Ethics </TD>

<TD>Establish and comply with ethical principles that </TD>
</TR>

<TR>
<TD>frameworks and </TD>

<TD>regulate the development and implementation </TD>
</TR>

<TR>
<TD>standards </TD>

<TD>of AI, guaranteeing that AI systems are created and utilised in a way that upholds human rights, dignity, and ethical norms. </TD>
</TR>

<TR>
<TD>Transparency and </TD>

<TD>Guarantee that AI systems are clear and </TD>
</TR>

<TR>
<TD>explainability </TD>

<TD>comprehensible in their operations and decision-making processes. Create AI models that are transparent and comprehensible to people, fostering increased trust and accountability. </TD>
</TR>

<TR>
<TD>Fairness and non-</TD>

<TD>Promote equity and impartiality by proactively </TD>
</TR>

<TR>
<TD>discrimination </TD>

<TD>addressing and minimising biases in AI systems. Conduct frequent audits and testing of AI algorithms to verify that they do not perpetuate or worsen discrimination based on race, gender, age, or other attributes. </TD>
</TR>

<TR>
<TD>Privacy and data </TD>

<TD>Ensure the implementation of strong data </TD>
</TR>

<TR>
<TD>protection </TD>

<TD>protection procedures to preserve personal and sensitive information. Ensure the preservation of user privacy and adhere to applicable data protection legislation. </TD>
</TR>

<TR>
<TD>Accountability </TD>

<TD>Define explicit channels of responsibility for </TD>
</TR>

<TR>
<TD>and responsibility </TD>

<TD>the decisions and acts of AI systems. Establish safeguards to provide recourse if AI systems inflict harm or function in unanticipated manners. </TD>
</TR>

<TR>
<TD>Safety and </TD>

<TD>Emphasise the primacy of ensuring the physical </TD>
</TR>

<TR>
<TD>security </TD>

<TD>and digital security of AI systems. Safeguard against the malevolent exploitation of AI technology and guarantee the robustness of systems against hacking and other cyber threats. </TD>
</TR>

<TR>
<TD>Sustainability and environmental impact </TD>

<TD>Evaluate the ecological ramifications of AI systems. Strive for the development and implementation of AI that is energy-efficient to reduce carbon emissions and support sustainability. </TD>
</TR>

<TR>
<TD>Inclusivity and </TD>

<TD>Ensure inclusivity and accessibility by designing </TD>
</TR>

<TR>
<TD>accessibility </TD>

<TD>AI systems that can easily be accessed and used by all individuals. Consider the varied requirements and capabilities of all possible users and strive to ensure that AI technology is advantageous and accessible to a broad range of individuals. </TD>
</TR>

<TR>
<TD>Element </TD>

<TD>Use </TD>
</TR>

<TR>
<TD>Collaboration and engagement </TD>

<TD>Interact with stakeholders, such as policymakers, industrial partners, academic researchers, and the public, to comprehend various viewpoints and tackle societal issues associated with AI. </TD>
</TR>

<TR>
<TD>Monitoring, improvement, regulation, and collaboration </TD>

<TD>Consistently assess and enhance AI systems to guarantee their adherence to ethical norms, legal obligations, and social values. Keep oneself updated on the latest developments in AI and adapt one’s practices accordingly. </TD>
</TR>

<TR>
<TD>Education, literacies, and awareness </TD>

<TD>All educational institutions have a responsibility to develop composite literacies and awareness around the nature of AI and the responsible use thereof. </TD>
</TR>
</Table>

<P>Following these guidelines helps to guarantee the responsible and ethical development and use of AI technology, with the aim of benefiting society while minimising any dangers and negative consequences. </P>
</Sect>

<Sect>
<H4>Conclusion </H4>

<P>To summarise, the field of AI ethics and its implementation in other areas like medicine and law, pose an intricate and dynamic challenge. The discussion about AI and the possibility of singularity, along with the implementation of fair information practices and responsible AI standards, highlight the need for a comprehensive approach to the development and regulation of AI. </P>

<P>We are currently at a critical point in time where the swift progress in AI technology, as demonstrated by systems such as GPT 4 and the expected development of models like GPT 5, necessitate a proactive and deliberate approach to ethical considerations. The OECD FIP standards serve as a fundamental framework for safeguarding data and ensuring privacy, which are of utmost importance in the era of AI. These principles, in conjunction with the guidelines for responsible AI, provide a clear path for the ethical, transparent, fair, and inclusive development of AI. </P>

<P>As we progress deeper into this era of technology, it is crucial to maintain a balance between innovation and accountability. AI development must be guided by ethical frameworks and norms to ensure that these technologies are utilised for the betterment of society while upholding human rights and dignity. Transparency, explainability, and accountability are not mere aspirations, but rather crucial prerequisites for fostering confidence and the approval of AI systems. To mitigate hazards connected with AI, it is necessary to address concerns of justice and nondiscrimination, prioritise privacy and data protection, and ensure safety and security. </P>

<P>Furthermore, the possible consequences of AI attaining or exceeding human cognitive capabilities – a central concept in the singularity discourse – emphasise the pressing need for these ethical deliberations. Continuous monitoring, stakeholder interaction, and adaptation is crucial in ensuring ethical practices in AI. The advancement of AI, with its potential benefits and difficulties, necessitate a cooperative endeavour involving technologists, ethicists, legislators, and the broader society. By engaging in such partnerships, we can guarantee that the development made in AI is in accordance with ethical values and has a good impact on the advancement of humanity. </P>

<P>Therefore, as we conclude this chapter, it is evident that the progression of AI ethics is continuous. The concepts and methods covered here are dynamic, continuously adapting to the technology they endeavour to regulate. The task at hand is not alone to responsibly advance AI, but also to consistently modify our ethical frameworks to align with the always evolving AI technology and its influence on society. </P>
</Sect>

<Sect>
<H4>References </H4>

<P>Alharbi, M. &amp; Mukhari, A. 2023. Information science and interdisciplinary: </P>

<P>Literature review. International Journal of Advances in Science </P>

<P>Engineering and Technology 9(2):38-42. </P>

<P>Ahmed, SR. 2020. Preventing identity crime: Identity theft and identity </P>

<P>fraud: An identity crime model and legislative analysis with </P>

<P>recommendations for preventing identity crime. Leiden: Brill. </P>

<P>
<Link>https://doi.org/10.1163/9789004395978 </Link>
</P>

<P>Asimov, I. (Ed.). 1984. Isaac Asimov presents the great SF stories 12 (1950). New York: DAW Books. </P>

<P>Bandi, A., Adapa, PVSR., &amp; Kuchi, YEVPK. 2023. The power of generative AI: A review of requirements, models, input-output formats, evaluation metrics, and challenges. Future Internet 15, 260. 60 pages.
<Link>https://doi.org/10.3390/fi15080260Bandi </Link>
</P>

<P>Bartneck, C., Lütge, C., Wagner, A., &amp; Welsh, S. 2020. An introduction to ethics in robotics and AI
<Link>. Cham: Springer. https://doi. </Link>

<Link>org/10.1007/978-3-030-51110-4 </Link>
</P>

<P>Bates, MJ. 2005. Information and knowledge: An evolutionary framework for information science. Information Research 10(4). 239. 30 pages. </P>

<P>Benk, M., Tolmeijer, S., Von Wangenheim, F., &amp; Ferrario, A. 2022. The value of measuring trust in AI – a socio-technical system perspective. arXiv:2204.13480v1
<Link>. 12 pages. Available at: https:// </Link>

<Link>arxiv.org/pdf/2204.13480.pdf</Link>
. (Accessed on 23 January 2024). </P>

<P>Berger, SE. &amp; Rossi, F. 2022. Addressing neuroethics issues in practice: Lessons learnt by tech companies in AI ethics. Neuron 110(13):2052
<Link>2056. https://doi.org/10.1016/j.neuron.2022.05.006 </Link>
</P>

<P>Bester, C. &amp; Fischer, R. 2020. The essential relationship between information ethics and artificial intelligence. Artificial Intelligence, Ethics and Society
<Link> 29:1-11. https://doi.org/10.29173/irie428 </Link>
</P>

<P>Birrer, FAJ. 1999. Understanding values and biases in IT. ACM SIGCAS Computers and Society
<Link> 29(1):16-21. https://doi. </Link>

<Link>org/10.1145/382042.382047 </Link>
</P>

<P>Borko, H. 1968. Information science: What is it? Journal of the Association for Information Science and Technology 
<Link>19(1):3-5. https://doi. </Link>

<Link>org/10.1002/asi.5090190103 </Link>
</P>

<P>Burgess, JTF. &amp; Knox, EJM. (Eds.). 2019. Foundations of information ethics. Chicago: American Library Association. </P>

<P>Burrell, J. 2016. How the machine ‘thinks:’ Understanding opacity in machine learning algorithms. Big Data &amp; Society 3(1). 12 pages. 
<Link>https://doi.org/10.1177/2053951715622512 </Link>
</P>

<P>Bynum, TW. 2010. Philosophy in the information age. Metaphilosophy 
<Link>41(3):420-442. https://doi.org/10.1111/j.1467-9973.2010.01651.x </Link>
</P>

<P>Capurro, R., Eldred, M., &amp; Nagel, D. 2013. Digital whoness: Identity, privacy and freedom in the cyberworld
<Link>. Frankfurt: Ontos Verlag. https:// </Link>

<Link>doi.org/10.1515/9783110320428 </Link>
</P>

<P>Ess, C. 2008. Luciano Floridi’s philosophy of information and information ethics: Critical reflections and the state of the art. Ethics and Information Technology
<Link> 10(2-3):89-96. https://doi.org/10.1007/ </Link>

<Link>s10676-008-9172-8 </Link>
</P>

<P>Ess, C. 2014. Digital media ethics. Revised and updated 2nd ed. Cambridge: Polity. </P>

<P>Floridi, L. 1999. Information ethics: On the philosophical foundation of computer ethics. Ethics and Information Technology 1(1):33-52. 
<Link>https://doi.org/10.1023/A:1010018611096 </Link>
</P>

<P>Floridi. L. 2010. The Cambridge handbook of information and computer ethics. 
<Link>Cambridge: Cambridge University Press. https://doi.org/10.1017/ </Link>

<Link>CBO9780511845239 </Link>
</P>

<P>Floridi, L. 2013. The philosophy of information. Oxford: Oxford University Press. </P>

<P>Floridi, L. 2014. The fourth revolution: How the infosphere is reshaping human reality. Oxford: Oxford University Press. </P>

<P>Floridi, L. 2015. The ethics of information. Oxford: Oxford University Press. </P>

<P>Floridi, L. 2016. Mature information societies – a matter of expectations. Philosophy &amp; Technology
<Link> 29(1):1-4. https://doi.org/10.1007/ </Link>

<Link>s13347-016-0214-6 </Link>
</P>

<P>Floridi, L. 2018. Soft ethics and the governance of the digital. Philosophy &amp; Technology
<Link> 31(1):1-8. https://doi.org/10.1007/s13347-018-0303-9 </Link>
</P>

<P>Floridi, L. 2019. What the near future of artificial intelligence could be. Philosophy &amp; Technology
<Link> 32(1):1-15. https://doi.org/10.1007/ </Link>

<Link>s13347-019-00345-y </Link>
</P>

<P>Floridi, L. 2020. AI and its new winter: From myths to realities. Philosophy &amp; Technology 
<Link>33:1-3. https://doi.org/10.1007/s13347-020-00396</Link>

<Link>6 </Link>
</P>

<P>Floridi, L. 2024. On the future of content in the age of artificial intelligence: Some implications and directions. Philosophy &amp; Technology
<Link> 37. 112. 11 pages. https://doi.org/10.1007/s13347-024</Link>

<Link>00806-z </Link>
</P>

<P>Farisco, M., Salles, A., &amp; Evers, K. 2018. Neuroethics: A conceptual approach. Cambridge Quarterly of Healthcare Ethics 27(4):717-727. 
<Link>https://doi.org/10.1017/S0963180118000208 </Link>
</P>

<P>Friedman, B. &amp; Nissenbaum, H. 1996. Bias in computer systems. ACM Transactions on Information Systems (Tois)
<Link> 14(3):330-347. https:// </Link>

<Link>doi.org/10.1145/230538.230561 </Link>
</P>

<P>Froehlich, TJ. 2000. Intellectual freedom, ethical deliberation and codes of ethics. IFLA Journal
<Link> 26(4):264-272. https://doi. </Link>

<Link>org/10.1177/034003520002600405 </Link>
</P>

<P>Han, J. 2022. An information ethics framework based on ICT platforms. Information
<Link> 13. 440. 11 pages. https://doi.org/10.3390/ </Link>

<Link>info13090440 </Link>
</P>

<P>Ibiricu, B. &amp; Van der Made, ML. 2020. Ethics by design: A code of ethics for the digital age. Records Management Journal 30(3):395-414. 
<Link>https://doi.org/10.1108/RMJ-08-2019-0044 </Link>
</P>

<P>Mason, RO. 1986. Four ethical issues of the information age. MIS Quarterly 
<Link>10(1):5-12. https://doi.org/10.2307/248873 </Link>
</P>

<P>Moor, JH. 1985. What is computer ethics? Metaphilosophy 16(4):266-275. 
<Link>https://doi.org/10.1111/j.1467-9973.1985.tb00173.x </Link>
</P>

<P>Moor, JH. 2006. The nature, importance, and difficulty of machine ethics. IEEE Intelligent Systems 
<Link>21:18-21. https://doi.org/10.1109/ </Link>

<Link>MIS.2006.80 </Link>
</P>

<P>Moor, JH. 2020. What is computer ethics? In Miller, KW. &amp; Taddeo, M. (Eds.): The ethics of information technologies, 15-24. London: 
<Link>Routledge. https://doi.org/10.4324/9781003075011-1 </Link>
</P>

<P>Morandín-Ahuerma, F. 2023. Twenty-three Asilomar principles for artificial intelligence and the future of life. OSF Preprints 5-27. 
<Link>https://doi.org/10.31219/osf.io/dgnq8</Link>
. </P>

<P>Ng, MY., Kapur, S., Blizinsky, KD., &amp; Hernandez-Boussard, T. 2022. The AI life cycle: A holistic approach to creating ethical AI for health decisions. Nature Medicine 
<Link>28(11):2247-2249. https://doi. </Link>

<Link>org/10.1038/s41591-022-01993-y </Link>
</P>

<P>Niederman, F., &amp; Baker, EW. 2022. Ethics and AI issues: Old container with new wine? Information Systems Frontiers: A Journal of Research and Innovation
<Link> 25(1):9-28. https://doi.org/10.1007/s10796-022</Link>

<Link>10305-1 </Link>
</P>

<P>OECD (Organisation for Economic Cooperation and Development). 2002. OECD guidelines on the protection of privacy and transborder 
<Link>flows of personal data. Available at: https://www.oecd.org/en/ </Link>

<Link>publications/oecd-guidelines-on-the-protection-of-privacy</Link>

<Link>and-transborder-flows-of-personal-data_9789264196391-en. </Link>

<Link>html</Link>
. (Accessed on 23 March 2024). </P>

<P>OECD (Organisation for Economic Cooperation and Development). 2013. 
<Link>The OECD privacy framework. Available at: https://www.oecd. </Link>

<Link>org/sti/ieconomy/oecd_privacy_framework.pdf</Link>
. (Accessed on 30 November 2023). </P>

<P>OECD (Organisation for Economic Cooperation and Development). 2023. Good practice principles for data ethics in the public sector. 10 
<Link>August 2023. Available at: https://digital-skills-jobs.europa.eu/ </Link>

<Link>en/inspiration/resources/oecds-good-practice-principles-data</Link>

<Link>ethics-public-sector</Link>
. (Accessed on 23 March 2024). </P>

<P>Paass, G. &amp; Giesselbach, S. 2023. Foundation models for natural language processing: Pre-trained language models integrating media. Cham: 
<Link>Springer. https://doi.org/10.1007/978-3-031-23190-2 </Link>
</P>

<P>Quinn, MJ. 2011. Ethics for the information age. 4th ed. New York: Pearson. </P>

<P>Sartori, L. &amp; Theodorou, A. 2022. A sociotechnical perspective for the future of AI: Narratives, inequalities, and human control. Ethics and Information Technology
<Link> 24(4). 11 pages. https://doi. </Link>

<Link>org/10.1007/s10676-022-09624-3 </Link>
</P>

<P>Shanahan, M. 2015. The technological singularity. Cambridge: MIT Press. 
<Link>https://doi.org/10.7551/mitpress/10058.001.0001 </Link>
</P>

<P>Shannon, CE. 1948. A mathematical theory of communication. The Bell System Technical Journal
<Link> 27(3):379-423. https://doi. </Link>

<Link>org/10.1002/j.1538-7305.1948.tb01338.x </Link>
</P>

<P>Solove, DJ. 2006. A taxonomy of privacy. University of Pennsylvania Law Review
<Link> 154(3):477-560. https://doi.org/10.2307/40041279 </Link>
</P>

<P>Solove, D. 2015. The growing problems with the sectoral approach to privacy law. Privacy + Security Blog
<Link>. 13 November 2015. https:// </Link>

<Link>teachprivacy.com/problems-sectoral-approach-privacy-law</Link>
. (Accessed on 13 November 2024). </P>

<P>Stahl, BC. 2008. Discourses on information ethics: The claim to universality. Ethics and Information Technology 10:97-108. 
<Link>https://doi.org/10.1007/s10676-008-9171-9 </Link>
</P>

<P>Strickland, E. 2021. The turbulent past and uncertain future of AI: Is there a way out of AI’s boom-and-bust cycle? IEEE Spectrum 
<Link>58(10):26-31. https://doi.org/10.1109/MSPEC.2021.9563956 </Link>
</P>

<P>Tavani, HT. 1999. Informational privacy, data mining, and the internet. Ethics and Information Technology
<Link> 1(2):137-145. https://doi. </Link>

<Link>org/10.1023/A:1010063528863 </Link>
</P>

<P>Tsamados, A., Aggarwal, N., Cowls, J., Morley, J., Roberts, H., Taddeo, M., &amp; Floridi, L. 2021. The ethics of algorithms: Key problems and solutions. AI &amp; Society 
<Link>37:215-230. https://doi.org/10.1007/978</Link>

<Link>3-030-81907-1_8 </Link>
</P>

<P>Turing, AM. 1950. Computing machinery and intelligence. Mind 59:433
<Link>460. https://doi.org/10.1093/mind/LIX.236.433 </Link>
</P>

<P>Ulam, S. 1958. Tribute to John von Neumann. Bulletin of the American Mathematical Society
<Link> 64(3):1-49. https://doi.org/10.1090/S0002</Link>

<Link>9904-1958-10189-5 </Link>
</P>

<P>Vaassen, B. 2021. AI, opacity, and personal autonomy. Philosophy &amp; Technology
<Link> 35. 88. 20 pages. https://doi.org/10.1007/s13347-022</Link>

<Link>00577-5 </Link>
</P>

<P>Wilson, TD. 1997. Information behaviour: An interdisciplinary perspective. Information Processing &amp; Management
<Link> 33(4):551-572. https://doi. </Link>

<Link>org/10.1016/S0306-4573(97)00028-9 </Link>
</P>

<P>Weidinger, L., Mellor, J., Rauh, M., Griffin, C., Uesato, J., Huang, P-S., Cheng, M., Glaese, M., Balle, B., Kasirzadeh, A., Kenton, Z., Brown, S., Hawkins, W., Stepleton, T., Biles, C., Birhane, A., Haas, J., Rimell, L., Hendricks, LA., Isaac, W., Legassick, S., Irving, G., &amp; Gabriel, I. 2021. Ethical and social risks of harm from language models. arXiv:2112.04359v1
<Link>. 64 pages. https://doi.org/10.48550/ </Link>

<Link>arXiv.2112.04359 </Link>
</P>

<P>Woodward, B., Imboden, T., &amp; Martin, NL. 2011. An undergraduate information security program: More than a curriculum. Journal of Information Systems Education 24(1):63-70. </P>

<P>Wright, D., De Hert, P., &amp; Gutwirth, S. 2011. Are the OECD guidelines at 30 showing their age? Communications of the ACM 54(2):119-127. 
<Link>https://doi.org/10.1145/1897816.1897848 </Link>
</P>

<P>Young, J., Smith, TJ., &amp; Zheng, SH. 2020. Call me BIG PAPA: An extension of Mason’s information ethics framework to big data. Journal of the Midwest Association for Information Systems (JMWAIS) 2:17-42. </P>

<P>Zhou, J., Chen, F., Berry, A., Reed, M., Zhang, S., &amp; Savage, S. 2020. A survey on ethical principles of AI and implementations. IEEE Symposium Series on Computational Intelligence (SSCI), 
<Link>Canberra, ACT, Australia, 3010-3017. https://doi.org/10.1109/ </Link>

<Link>SSCI47803.2020.9308437 </Link>
</P>
</Sect>
</Sect>
</Sect>

<Sect>
<H2 id="LinkTarget_10758">Chapter 5 </H2>

<Link><Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_41.jpg"/>
</Figure>
</Link>

<P>Unpacking the Role of Big Data, Artificial </P>

<Sect>
<H3>Intelligence, and Predictive Analytics in Education: Implications for Educators and Research Ethics Review Committees </H3>

<P>Nicky Tjano  </P>

<Link><Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_42.jpg"/>
</Figure>
</Link>

<Link>
<P>Director of Teaching &amp; Learning Strategy, Projects, and Portfolio Performance University of South Africa Pretoria, South Africa </P>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_43.jpg"/>
</Figure>
</Link>

<Sect>
<H4>Introduction </H4>

<P>Goodbye to ChatGPT (chat generative pre-trained transformer), hello to AI (artificial intelligence) on the moon! AI is daring to have its finger touching the surface of the moon. The CMCSS (Canadian Mission Control Space Services) through budgetary funding of $3.04 million by the Canadian Space Agency made history when it launched the Rashid Rover on 11 December 2023, with the aim of spending one lunar day1 in space. The mission will see the Rover capturing and identifying geological features through pictures, and it was motivated by CMCSS’ urge to be the pioneer in showcasing AI’s DL (deep learning) capabilities first in lunar space. DL is a subset of ML (machine learning) and it relies on large and vast volumes of data, based on complex algorithms to train the model (Rane, Kaya, Mallick, &amp; Rane 2024:218). </P>

<P>The enormous datasets, whether structured or unstructured, are where big data and PA (predictive analytics) traverse. Using data points and the precision of analysis are just a drop in an ocean in terms of capabilities and insights that can </P>

<P>One lunar day is equivalent to 29 Earth days. </P>

<Link><Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_44.jpg"/>
</Figure>
</Link>

<P>be drawn from this interaction. The data gigantic is known as ‘big data,’ a term which, according to O’Leary (2013:96), has experienced exponential growth and has to date accumulated many definitions. Goh, Cheong, Tan, and Sharma (2023:16) allude that voluminous amounts of data that are being produced across various platforms, flourish exponentially. The authors go further to predict that this growth is expected to double every two years, and with such massive datasets, human-geared analysis is almost impractical, hence the need for sophisticated computational analysis (Emanuel &amp; Wachter 2019:2281). To demonstrate the surge in data on the internet, Solarwinds Pingdom (2013) breaks down various types of datasets globally available on the web. For example, in 2012, there were approximately 2.2 billion e-mail users, 144 billion e-mail traffic per day, half a billion websites, 2.4 billion internet users, and over 100 million blogs. This constitutes data in unstructured form such as pictures, audio, text, and videos. As of 2023, there were over 5.3 billion internet users translating into almost 67% of the global population. In terms of the social media, there were about 4.95 billion users coupled with over 600 million blogs (Petrosyan 2023). These data statistics, according to Chu, Kim, Lin, Yu, Bradski, Ng, &amp; Olukotun (2006:1 of 8) reflect data evolution through the ‘internet of people and things’ and the ‘internet of everything.’ </P>

<P>The introduction of personal computers almost five decades ago (Flamm 1988:6), the prevalence of supercomputers and their interaction with human beings (Chen, Chen, &amp; Lin 2020:75265), as well as the use of mobile technology (Alexandru, Alexandru, Coardos, &amp; Tudora 2016:123) and social media have enabled the generation and availability of data in excessive forms (Luan, Geczy, Lai, Gobert, Yang, Ogata, Baltes, Guerra, Li, &amp; Tsai 2020:2 of 11), which consequently paved way, and to some extent enabled these developments. According to Grand View Research (2023), the proliferating utilisation of mobile devices and their applications have transmogrified modern life in various ways, and this has caused a paradigm shift away from brick-and-mortar operations to digital and recently to cloud-based operations. Similarly, still on digital and technological transformation, Nam and Pardo (2011:185) highlight a number of evolutions in applications, ML techniques, DM (data mining), ICT (information and communication technology), as well as data fusion and pattern-recognition techniques as instrumental to the industrial revolution, with smart cities of the future being one part of it. </P>

<P>Big data as O’Leary (2013:97) puts it, were originally brought to the fore by Cox and Ellsworth in their 1997 work which reflected on how big data can be managed in the era of scientific visualisation (Cox &amp; Ellsworth 1997). In that article, the two authors use the term ‘large data.’ Big data refers to information that is generated rapidly and in a wide variety from digital sources (Venugopal &amp; Mamatha 2023:1). Conducting a literature review, an analysis of statistical data, mathematical, and statistical problem solving, improved personalised learning is purporting benefits driven by technological advancements in AI, big data, ML, and PA (Venugopal &amp; Mamatha 2023:1). PA and AI rely on massive data warehouses that store both historical and real-time information. The connection between AI and big data can be best explained through a process of symbiosis, in that they are both dependent on one another to deliver above-par insights, where data analytic capabilities become an enabler in the journey for many industries to leverage the gains. For example, it is reported that the fusion of big data and ML tools and techniques such as DL models – CNNs (convolution neural networks) and LSTM (long-short term memory) architectures </P>

<P>– enable abilities of metropolitan municipal departments to </P>

<P>realise the emergence of smart cities, where they are able </P>

<P>to predict region-based traffic flow (Khan, Nazir, García-Magariño, &amp; Hussain 2021). In these models CNNs are applied to classify spatial data whereas LSTM is used for temporal data classification (Khan et al. 2021:1-4 of 11). Other industrial areas benefiting from the application of AI, analytics, and big data include but are not limited to predictions of crowd flow (Zhou, Gu, Ling, Li, Zhuang, &amp; Wang 2020:338), traffic flow (Khan, Ali, Ullah, &amp; Bulbul 2018:71; Khan et al. 2021:1-2 of 11), water quality (Assem, Ghariba, Makrai, Johnston, Gill, &amp; Pilla 2017:317), and air quality (Xiong, He, Huang, Yu, &amp; Jing 2020:78). Neha and </P>

<P>Sidiq (2020:48) allude that within the educational landscape, IHEs (institutions of higher education) are able to predict </P>

<P>students’ performances and success through additional ML techniques such as CF (cooperative filtering) and recommender systems (RS). DM, regression techniques, classifications, neural networks, and SVMs (support vector machines) are some of the terminologies associated with ML and big data (Neha &amp; Sidiq </P>

<P>2020:49). </P>

<P>No exploration of AI in education would be complete without addressing the challenges that arise in tandem with the integration of big data and PA. Algorithmic bias, ethical concerns, and the potential erosion of privacy are formidable issues that demand attention – hence they are discussed in the chapter. The chapter not only identifies these challenges, but quest also to highlight possible measures and principles aimed at mitigating against these challenges and concomitant risks that are complement to the implementation and integration of AI, big data, and PA education. Peering into the future, the concluding section of the chapter ventures into the implications that big data and AI clench for work and responsibilities of RERCs (research ethics review committees). The key questions worth exploring include the following: </P>

<L>
<LI>
<Lbl>• </Lbl>

<LBody>How will the advancements in big data, AI, and PA reshape the ethical considerations governing educational research and functions or RERCs? </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>What novel challenges and opportunities will emerge as we tread further into this technological frontier? </LBody>
</LI>
</L>

<P>These questions propel us towards an informed reflection on the evolving landscape of research ethics, underscoring the need for adaptable frameworks in the face of accelerating technological progress. This chapter is an exploration and exciting journey beaming through the past, present, and future of AI, big data, and PA integration in education. The threads that weave these technologies into the fabric and the nature of the educational sector are therefore unravelled, contemplating their impact on educators, research ethics review committees, and the very foundations of educational practices. The next section sets the scene for the chapter by delving into the classifications of AI systems. </P>
</Sect>

<Sect>
<H4>Classifications of AI Systems </H4>

<P>In this era of AI, where its capabilities and prowess are taking the world by storm, ML remains one of the topical issues within academic discussions and conferences (Sen, Hajra, &amp; Ghosh 2020:99). The enhancement, efficiency, and precision of education practices can be attributed to how AI alongside data science and ML algorithms are leveraged. While data science, AI, and ML are closely connected to one another and can be clustered in the same domain, their overlap, specific applications, and meanings are worth being explored. Signifying the importance of ML techniques and their convergence with AI, Salem (2015:201) echoes sentiments in the literature that ML and computational intelligence coupled with DM techniques amplify the wherewithal of e-Iearning systems, thereby enabling them to reveal even more intelligent behaviour beneficial to the educational landscape. </P>

<P>The popularity of ML systems and techniques is credited to the work of Alan Turing, a British scientist who through his seminal work on computing, machines, and intelligence asked what could at the time be a rhetorical question: ‘Can a machine think?’ (Turing 1950). In his work, Turing hypothesises that it is possible that machines have the ability to think and execute decisions just as humans can. It goes without saying that he was mocked, however, little did they know that almost eight decades later, his thoughts would be poised to be the turning point for the AI of the future. Furthermore, his thoughts would continue to be the centre of debate between machines and human abilities. Muhammad and Yan (2015:946) point at the supernatural abilities of ML as computers are guided how to automatically explore good predictor drawing from previous experience. The authors go on to acclaim that abilities like that depend on how good the classifier is. Machines, referring to computers are extrapolated to be lacking abilities to learn from past experiences, while humans have that attribute (Muhammad &amp; Yan 2015:947). This is an interesting reflection point to witness how far AI and in particular ML has evolved! </P>

<P>ML is built on the principle of incessant learning algorithms that continuously engage in decision-making or the identification </P>

<P>of patterns. ML models are trained to utilise preexisting data (this is one of the commonly used forms) or ML is a subset of AI empowered by neural network technology, and it is within the domain of data science. For example, the ChatGPT model by OpenAI is empowered by a transformer architecture that is used to generate text. This is the key strength and capability of generative AI (Gen-AI), and models like these are trained on enormous, large data, mainly testing the equivalent of over eight million documents in excess of 10 billion word characters (ATRIA 2023). </P>

<P>There are three main types of ML, namely supervised, unsupervised, and reinforcement learning. Big data, DM, and training are at the core of ML tools’ efficiency (Manne &amp; Kantheti 2021:79), as all of these are using ML algorithms. Figure 5.1 below provides a schematic presentation of various classifications of AI systems. In comprehending the ML capabilities, Muhammad and Yan (2015:946) discern between supervised learning and unsupervised learning and explain that the former demonstrates the ability of machines to learn in a trained form. Supervised learning is one of the broad categories of ML, as it refers to algorithms and models that make predictions about future outcomes (Sen et al. 2020:100). These models draw their strengths and efficacy from the training they have been subjected to, which is based on previous data – hence they are referred to as ‘supervised’ models. </P>

<P>Unsupervised learning models on the other hand, are those models that are able to use machine intelligence to identify patterns and categories in the dataset. What sets them apart from supervised learning models is that they rely on unlabelled data and can independently organise that data to discover insights (Barlow 1989:295; Hastie, Tibshirani, &amp; Friedman 2009:485-486). </P>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_45.jpg"/>

<Caption>
<P>Figure 5.1: Classification of AI systems. (Source: Kumar 2018) </P>
</Caption>
</Figure>

<P>Therefore, the distinguishing feature between supervised and unsupervised learning is the ability to act independently and is underpinned by data preparation and data preprocessing (Kotsiantis, Zaharakis, &amp; Pintelas 2007:249). Lastly, reinforcement learning models are those models that act as intelligent agents and are aimed at taking actions and decisions within a well-defined environment. Each instance of datasets in ML is characterised by similar features, which could be continuous, categorical, or binary (Muhammad &amp; Yan 2015:947). This is where ML and DL intersect and play an influential role. The two concepts are both characterised by a spectrum of approaches such as supervised, unsupervised, and reinforcement learning, neural networks, decision trees, and other forms of learning techniques (Samoili, López Cobo, Gómez, De Prato, Martinez-Plumed, &amp; Delipetrev 2020:4). </P>

<P>NLP (natural language processing) applications and LLMs (large language models) are primarily conversational AI technologies, also known as dialogue or chatbot AI, focused on creating intelligent systems that can engage in natural language conversations with humans. These systems are designed to understand and generate humanlike responses to text or speech inputs. Conversational AI models employ techniques such as NLU (natural language understanding) to comprehend the user’s input, dialogue management to maintain context and flow within the conversation, and NLG (natural language generation) to generate appropriate and coherent responses. Their goal is to create interactive and engaging conversational experiences, whether in text-based chatbots, voice assistants (speech recognition), or other conversational interfaces. The other AI capabilities like expert systems, robotics, planning, and machine vision fall outside the scope of this chapter – however, they are worth exploring. </P>

<Sect>
<H5>AI, Big Data, and Predictive Analytics Applications and Capabilities </H5>

<P>The decussation of big data and AI is driven by their quest and ability to analyse vast datasets, recognise intricate patterns through classification techniques, and generate insights that are practically beyond the grasp of traditional human-geared abilities. The efficacy of big data and PA depends on the integration of a variety of AI technology tools and ML techniques, such as DM, social network analysis, statistical analysis, text analytics, data visualisation, and signal processing (Sivarajah, Kamal, Irani, &amp; Weerakkody 2017:265). Chen and Zhang (2014:314) share similar sentiments on big data capabilities and suggest that they rely on the ability to conduct social network analysis. Such level of prowess needs access to data, a voluminous one for that matter, as well as ML algorithms. ML is fortified by DL, which is a subset of ML, relying on using large and gigantic volumes of data through complex algorithms to train the model. The enormous datasets, whether structured or unstructured is where big data and data analytics traverse (Janiesch, Zschech, &amp; Heinrich 2021:685). It is from this perspective that PA thrives. </P>

<P>The interrelatedness between AI and associated techniques such as ML algorithms, NLPs, DL, and computer vision capabilities and techniques is depicted in Figure 5.2 below. Researchers are increasingly leveraging on these tools and techniques to accelerate their work (Ekman 2021:25). ChatGPT, Google Bard, and Microsoft Co-Pilot are some of the popular Gen-AI tools that can be clustered within the domain of NLP applications. </P>

<P>They are basically meant for language translation, summarising text, content evaluation, and language generation, all of which play a transformative role of improving efficiency, enhancing communication, task automation, and gathering insights. </P>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_46.jpg"/>

<Caption>
<P>Figure 5.2: Interrelatedness between AI techniques and their </P>
</Caption>
</Figure>

<P>selected application in the industry. (Source: Takyar </P>

<P>2022). </P>

<P>This is where Gen-AI and conversational AI through the use of chatbots come into play. Second, automation, fraud detection, data analysis, and predictive analytics are other uses of AI, including its related techniques and algorithms. With automation, monotonous tasks are by nature programmed to improve efficiency and productivity. This function is dependent on AI models, as well as ML and DL algorithms and techniques to use historical data to analyse voluminous amounts of data, to predict and make forecasts for planning and resource optimisation. The engendered output then drives informed decision-making, thus sparking data-driven insights. </P>

<P>The value in the decisions that are made is likely to be manifested in sustainable competitive and superior advantage (Coyne 1986:54). For fraud detection, AI algorithms have an amassed ability to detect the prevalence of any anomalies during transactions, and where necessary point out the existence of patterns. This will enable the provision and detection of early warnings for triggering risk mitigations and the prevention of fraud. The last two uses include quality enhancement and control as well as e-commerce for marketing and the creation of advertisements. With the former, AI systems can be used for the automation of quality control processes, for example, in the automotive industry for ensuring product consistency and reducing defects during the car manufacturing process. Mishra (2019:1025) states that AI systems and big data can be used to improve quality assurance in the administration of quality processes as well as teaching and learning. Likewise, Chassignol, Khoroshavin, Klimova, and Bilyatdinova (2018:17) corroborated by Chen et al. (2020:75269) affirm their use towards improving performance and efficiency through the automation of administrative tasks and processes. </P>

<P>The precipitous technological evolution of big data and AI has impacted modern life and humanity, from transforming economies, business, workplace, science, and politics to education (Grand View Research 2023; Luan, Geczy, Lai, Gobert, Yang, Ogata, Baltes, Guerra, Li, &amp; Tsai 2020:1-2 of 11). The common applications of AI, big data, and predictive analytics highlight their versatility and impact across various industries. Some of these advancements have enabled improved service delivery and enhanced customer experiences, leveraging data-driven decision-making, personalised experiences, systems and process automation, and operational efficiencies (Takyar 2022). </P>

<P>The source of big data as identified by Kanika and Khan (2017:193), among others, include wearable and sensor devices, social media sites, and medical data. With each click, as of late, IHEs are also leveraging opportunities brought by AI and big data, for example, ensuring smooth teaching and learning (Khan &amp; Vivek 2022:9), undertaking education research, enhancing assessments, and individualised learning experiences and policymaking (Luan et al. 2020:1 of 11), automated audit management and predictive analysis, and modelling student behaviour (Hjuler 2019:2-7). </P>
</Sect>

<Sect>
<H5>Characteristics of Big Data </H5>

<P>Big data characteristics are displayed in the transformative power of ML and DL algorithms. Drawing from the definition of AI by HLEG (the high-level expert group on artificial intelligence), it is undeniable that data and analysis anchor the AI capabilities across different disciplines, from psychology, political science, and education to the medical industry (Luan et al. 2020:2 of 11). The increasing use of social media and smart mobile devices has powered the generation and availability of capacious amounts of data across various platforms (Alexandru et al. 2016:123). At the core of big data definitions lies three key traits, namely volume, variety, and velocity, denoted as the 3Vs. The first V refers to the voluminous amount of data generated from various sources; the second V relates to the multiplicity of sources from which these data are generated; while the last V relates to the speed with which these data are generated or become available (O’Leary 2013:96). </P>

<P>As research on big data continues, new traits are added. For example, Amit Sheth in a 2014 conference paper on how big data can be transformed into smart data cited the fourth characteristic, that is veracity (Sheth 2014:2), thus changing the denotation to four Vs. In the same view, Patgiri and Ahmed worked in 2016 on the Vs of the game changer in big data as the authors confidently affirmed that as big data continue to disrupt and blossom across industries across the world, this domination will endure until 2030 (Patgiri &amp; Ahmed 2016:17). Fast forward, the literature on big data is fairly demonstrating what characterises the field has evolved, with more Vs added. Khan et al. (2021:2 of 11) mention five Vs, with the addition of value referring to what insights the generated data can produce. Two additional characteristics </P>

<P>– variability and visualisation – as identified by Alexandru et al. (2016:124) complete the characteristics of big data as illustrated in Figure 5.2. Variability refers to data changes, while visualisation is an extent of data readability. The IoT (internet of things) has enabled developments in big data and generate both structured and unstructured data (Solarwinds Pingdom 2012). Likewise, Wang and Preininger (2019:18) point out that most of the data being generated, especially in the medical sector remain unstructured. Due to unclear, ear-splitting, and incomplete data, a comprehension of the variability of diseases in patients can be a complicated process. </P>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_47.jpg"/>

<Caption>
<P>Figure 5.3: The characteristics of big data. (Source: Alexandru et al. 2016:124) </P>
</Caption>
</Figure>

<P>A slow adoption of technological advancements and applications in various sectors is another drawback (Manne &amp; Kantheti 2021:87). Nonetheless, the promise of AI in these sectors is not as bleak as it is imagined. The key capability of AI in big data is automation and the enhancement accompanied by the data analysis process. This impact consequently fuels capabilities for PA to be more accurate and scalable. On scalability, O’Leary (2013:98) calls for AI that will be ascendable to the cluster of machines. </P>
</Sect>
</Sect>

<Sect>
<H4>Advancements in Big Data, AI, and Predictive Analytics for Education </H4>

<P>According to Cheng, Chen, Sun, Zhang, and Tao (2018), the developments and rise in numbers of technological tools propelled by AI has transformed various industries. At the heart of big data and AI capabilities lies the power of algorithms, which are also evident through the field of data science and PA. These sentiments are also shared by Goh et al. (2023:2) in a consultation paper prepared by a big data and AI review group on behalf of the bioethics advisory committee of Singapore. The authors put forward that developments in and the increasing use of big data, AT (auto tech) tools, and techniques enable researchers to merge and analyse large amounts of data, from which they are able to identify patterns, trends, and correlations. It is from these analyses that insights and data-driven decisions are drawn. Just as the other sectors or industries are reaping the benefits of AI and big data advancements, so is the education sector also witnessing a variety of benefits. The benefits for the education sector caused that IHEs are able to use PLA (predictive learning analytics) to track students’ learning progress, to enhance curriculum planning and development, to measure the efficacy of teaching strategies, and trigger data-driven decision-making and support for an enhanced teaching and learning environment (Zawacki-Richter, Marín, Bond, &amp; Gouverneur 2019:3-4). Additionally, educators are efficient and effective in how they deliver content (Cheng et al. 2018). </P>

<P>The integration of AI, ML, and data analytics in education offers many benefits. Johnson (2023) adds that personalised learning strategies such as enabling early and tailored interventions in learning for struggling or at-risk students and promoting an adaptive learning environment to adjust course content to match the teaching styles are possible. The other benefits include stimulating a dynamic learning environment through which students’ answers can be assessed with the necessary effective teaching pedagogies (Lin, Huang, &amp; Lu 2023). Naveenkumar (2023) highlights that the ability to adapt the learning process according to the needs and preferences of students makes it possible to provide personalised feedback. Goh et al. (2023:21) succinctly put some of these benefits as follows: </P>

<L>
<LI>
<Lbl>• </Lbl>

<LBody>Better prediction abilities and diagnostic functionalities; </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>quality improvements and efficiency; and </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>advancements in personalised experience. </LBody>
</LI>
</L>

<P>These advantages affirm the potential power of big data and AIML (artificial intelligence and machine learning). It is therefore valid that the why and how questions of big data and AI’s applicability in education are explored, with a detailed analysis being provided in the next four subsections. </P>

<Sect>
<H5>Algorithms and AI in Education </H5>

<P>Salem (2015:201) opines that ML algorithms coupled with data mining techniques, computational intelligence, and electronic Iearning systems, boost additional computing capabilities to display more intelligent behaviour. The application of big data and AI algorithms and systems in education is gradually gaining momentum. Hence, integrating ICT into the classroom has changed the focus of education from the educator to the student, making it possible for students to study whenever and wherever they want, around the clock. AI provides the most efficient means of communication with students and helps businesses to learn about their many talents. Niklas Hjuler in his 2019 work focusing on algorithms and AI in education presents a variety of specialised uses of big data and AI technologies (Hjuler 2019). For example, in a natural language community, neural network models, writing style development, authorship verification, and ghostwriters detection are some of many uses of big data and AI systems as explained below. </P>

<P>On writing style development, Stephan Sloth Lorenzen along with his two colleagues Hjuler and Stephen Alstrup, conducted a large-scale examination of writing-style development among more than 10,000 Danish high school learners in 2019 (Lorenzen, Hjuler, &amp; Alstrup 2019). In that work, they have analysed over 100,000 essays through a Siamese neural network2 to determine the existence of similarities between two sets of texts. They were able to construct a writing style development profile for </P>

<P>It is often referred to as a twin neural network which is a form of an artificial neural network popularly used in biometric identity verification technologies and systems such as handwritten text recognition, fingerprints detection, face detection, and signature verification (cf. also Taigman, Yang, Ranzato, &amp; Wolf 2014; Bromley, Guyon, LeCun, Säckinger, &amp; Shah 1993; Chopra, Hadsell, &amp; LeCun 2005). </P>

<P>the learners in order to distinguish between global development trends and patterns among the learners and identify at-risk learners that may need support and guidance. The study reveals general development trends among the learners, suggesting that their writing styles deviate as they advance through their grades, thus portraying huge differences between when they started and when they left school. In appreciation of the power and abilities of the Siamese neural network, Chicco (2021:73-74) advocates that when confronted with the need to compare to more complex data samples, consisting of features with different dimensionalities and types, compression is needed before processing. Writing style is important for educators to understand behavioural patterns of students, as Lorenzen et al. (2019:573) advise that changes in the writing style may be indicative of students attempting to cheat. This insight is vital because such ML and PA detective abilities may not have been possible through a human-driven exercise. </P>

<P>Relating to writing style development detecting abilities, the other feature of DL abilities is that of authorship verification. Authorship verification in the era of AI-powered text generation bots using NLP, plagiarism, and essay mills are on the rise. These potentially compromise academic integrity and ethical principles expected of the authors as explained in the chapter on AI and academic integrity decussation. Work in the area of authorship verification includes Qian, He, and Zhang (2017), Stamatatos (2009), as well as Stavngaard, Sørensen, Lorenzen, Hjuler, and Alstrup (2019), to name a few. The methods and techniques usually used to study authorship verification involve learning a similarity measure for writing style through examination of examples from different sets of authors. With advancements in AI technologies almost on a daily basis, chances of advancement in plagiarism acts are also increasing. That mentioned, it would be ideal that future developments in authorship verification space investigate predictions and changes in writing styles to make provision for early alert systems to identify at-risk or cheating students (Lorenzen et al. 2019:573). </P>

<P>With the increase in AI and big data usage in education, a number of articles has examined the rise in mechanisms aimed at detecting ghostwriters in academic writing. The work by Ali and Alhassan (2021) puts to light the increasing concerns around ghostwriting and contract cheating in HE (higher education). In response to these concerns, the literature on ghostwriting and academic writing and integrity suggests that AI and big data techniques can be applied to determine instances where such acts have occurred and proposes guidance and solutions on how such practices can be prevented. Draxler, Werner, Lehmann, Hoppe, Schmidt, Buschek, &amp; Welsch (2023) apply a Siamese neural network to examine the effect of AI-geared ghostwriting by comparing the similarity of texts and constructing a specific profile for students based on writing style development. In these instances, the co-conspirators would normally and deliberately decide not to declare authorship and ownership of AI-generated text. Still on ghostwriting, Zimmerman provides a discourse analysis on the possible prevalence of bias in NLP tools such as ChatGPT in generating pre-print scientific articles and co-write editorials (Zimmerman 2023). To circumvent against such acts and practices, the author proposes using tools such as DetectGPT while 
<Link>Originality.ai</Link>
 can be used to detect AI-written content. Similarly, Turnitin-AI detection software has also been introduced and all in all these measures are aimed at ensuring the fair and transparent use of AI-text generation tools and chatbots. The challenges with these tools are documented in the literature and some of them include the inability to keep up with the speed with which AI-text generation tools are advancing and being developed. Second, given the ability of these AI systems to impersonate human element, it is still difficult to distinguish between human-generated and ML-generated text (De Vries 2022:93). Lastly, Heikkilä (2023) supported by Kirchner, Ahmad, Aaronson, and Leike (2023) put forward that AI-text detection software and systems are still battling the high rates of false negatives and false positives, thereby underscoring the complexity that comes with effective AI-text detection tools and the dire need for investment in an enduring research and development efforts. </P>
</Sect>

<Sect>
<H5>Predictive Analytics </H5>

<P>Gen-AI and NLP models usually make use of historic data to make predictions and forecasts, aiding industries. Education is no exception as Choi and McClenen (2020:1-2) are of the opinion that through powerful DM and AI capabilities, learning analytics and performance predictions can be executed at ease. Within the context of education, the ability to predict the performance of students remain topical in the era of AI and big data (Bhushan, Shingate, Vyas, Naman, &amp; Shirsath 2023:1-2 of 7). The success of students is an issue of paramount importance for a myriad of stakeholders – the government for funding purposes and assessing value for money, the institution for gauging teaching strategies and efforts of teaching staff, and parents for personal development and progress of their children. </P>

<P>Hlosta, Herodotou, Papathoma, Gillespie, and Bergamin (2022:1 of 12), examining PLA in online education, have revealed that due to a number of unforeseen events such as new family and work commitments, health challenges, and technology infrastructure accessibility issues that can take place during the course of their studies, students’ behaviour is thus affecting their performance. Their study therefore recommends new data sources that may possibly be integrated into predictions to abate the impact of some of these events, and these include data on financial assistance. Such data when thoroughly integrated and properly analysed can lead to insights that can highlight the importance of complementing AI-based systems with human intelligence. The information about the academic performance of students is therefore necessary to devise mechanisms aimed at improving the learning outcomes and also reducing dropout among students (Neha &amp; Sidiq 2020:48; Zawacki-Richter et al. 2019:3). This information can particularly be useful to both students and educators to leverage the pros of technology-enhanced learning. This approach goes beyond what a good AI system and DM can offer. </P>

<P>The selected associated DM techniques and their uses among others include classification technology, being illustrated in Table 1. To realise these benefits, automation of processes, ML, and AI technologies and techniques such as expert systems, DL, ANNs (artificial neural networks), and CNNs are crucial to being able to predict student success. The result of predictions will help to anticipate students’ actions and behaviour, improving the instructional style of educators, and devising necessary support measures to aid learning. When classifying learning behaviours, unsupervised learning methodologies and clustering algorithms tend to be applied in the absence of true and reliable data to train the model (Lin et al. 2023:5 of 22). Muhammad and Yan (2015:946) define classification as ‘the process of using a model to predict unknown values (output variables), using a number of known values (input variables).’ PA relies on DM and SVMs to classify and regress massive amounts of data to be able to predict or make forecasts. This ability is one of the ML algorithms and it can best be applied in text recognition, which is within the ambit of Gen-AI (Neha &amp; Sidiq 2020). </P>

<Table>
<Caption>
<P>Table 5.1: Application, uses, and classification accuracy of selected ML DM techniques </P>
</Caption>

<TR>
<TH>ML DM Techniques </TH>

<TH>Application and Uses </TH>

<TH>Classification Accuracy Range </TH>
</TR>

<TR>
<TD>Decision trees </TD>

<TD>Predict students’ enrolment and identify dropout cases (Baradwaj &amp; Pal 2012). </TD>

<TD>Up to 98.90% </TD>
</TR>

<TR>
<TD>Feature selection algorithms </TD>

<TD>Predict students’ failures (Jantawan &amp; Tsai 2013:2;Karabulut, Özel, &amp; Ibrikci 2012:323). </TD>

<TD>Up to 99.30% </TD>
</TR>

<TR>
<TD>Bayesian and ensemble methods, as well as multilayer perceptions and sequential minimal optimisation techniques </TD>

<TD>Predict postgraduate students’ employability chances (Mishra, Kumar, &amp; Gupta 2016:2275). </TD>

<TD>Up to 97.45% </TD>
</TR>

<TR>
<TD>Neuro-fuzzy techniques </TD>

<TD>Pace and assess mathematics students in early stages of their academic year (bin Mat &amp; Buniyamin 2017:685). </TD>

<TD>90.03% </TD>
</TR>
</Table>

<Table>
<TR>
<TH>ML DM Techniques </TH>

<TH>Application and Uses </TH>

<TH>Classification Accuracy Range </TH>
</TR>

<TR>
<TD>Clustering techniques </TD>

<TD>Analyse students’ causes of failure, utilising association rule mining to analyse students’ performances (Deperlioglu &amp; Birtil 2016:505; Kumar 2016:24). </TD>

<TD>99.81% </TD>
</TR>

<TR>
<TD>CNNs </TD>

<TD>Extract features from images usually used for handwriting recognition (Lin, Kuo, &amp; Chiang 2021; Remaida, Moumen, El Bouzekri El Idrissi, &amp; Abdellaoui 2021:71) and to assess students’ emotional understanding (Hung 2021:715; Sharma &amp; Mansotra 2019:4692; Zhang, Jiang, Zhang, Wang, Zhao, &amp; Wang 2022:2) </TD>

<TD>99-100% </TD>
</TR>

<TR>
<TD>SVMs </TD>

<TD>Research for the classification of educational resources and regression analysis from biology, financial analysis, the medical industry, etc. (Ma &amp; Guo 2014:2; Murty &amp; Raghava 2016:41; Quan &amp; Pu 2023:8097) </TD>

<TD>93.90% </TD>
</TR>
</Table>

<P>The added benefit of most of these techniques and methods is that their level of accuracy ranges between 93 and 100%. Muhammad and Yan (2015:947) acknowledge that the number of methods and techniques aimed at measuring prediction accuracy is many and varies depending on how the training data set is split. Cross validation and rotation estimation methods are but a few examples of those measurements. In their study, they measure accuracy as a function of the number of correct classifications divided by the total number of test cases. The lowest performance on accuracy prediction as shown in the table is SVMs, with decision trees and Bayesian methods performing in the median range. CNNs on the other hand are topping the list with a 100% accuracy. </P>
</Sect>

<Sect>
<H5>Intelligent Educational and Tutoring Systems </H5>

<P>Nowadays, robots in education can perform a wide range of activities. Salem (2015:196) identifies IESs (intelligent educational systems), cognitive science, knowledge structure, intelligence authoring shells and interfaces, teaching and learning aspects, as seven areas in which AI can be applied in education. According to Salem, ITSs (intelligent tutoring systems), educational robotics, and multimedia systems are the key systems of IES. IES can be categorised into two broad groups of AI integration. In this section, the role of AI and ML integration, predictive data analytics and big data technologies in advancing the capabilities of ITSs is explored. </P>

<P>ITSs refer to computer-based learning systems which utilise AI capabilities to deliver students’ personalised and adaptive learning instructions (Lin et al. 2023:2). The origin of the concept, according to Carbonell (1970:190), can be drawn back as far as the 1970s. Most ITSs are empowered by AI technology such as NLPs through which chatbots are used as an interactive platform to provide feedback and necessary insights pertaining to learning (Liddy 2001:12) through flipped learning environments (Lin &amp; Mubarok 2021:16). AI-driven ITSs are knowledge-based systems acting as intelligent tutors and can be used in real teaching, learning, and training environments to provide for personalised guidance and support to students. These systems analyse student performance data and adapt the learning content and assessments to cater to individual strengths and weaknesses. Among many benefits, these technologies boost the advantages of personalising students’ learning experiences for them. </P>

<P>Through PA, ITSs foster a student-centric approach (Bhushan et al. 2023) and allow for student learning to be personalised through an analysis of student datasets, highlighting the areas of improvement (gaps) as far as learning is concerned. Through the identified gaps, ITSs are able to tailor the instructional design preferences accordingly and stimulate learning engagement and emotions of students, improving their academic performance (Lin et al. 2023:2 of 22). The other associated benefits of ITSs are geared towards researchers who are interesting in devising adaptive learning environments which, according to Papamitsiou and Economides (2014:42), can potentially promote individualised tutoring pedagogies through one-on-one support and guidance. Furthermore, Zawacki-Richter et al. (2019:3) in highlighting the importance of adaptive learning suggest that it gears its advantage towards adjusting learning content pace and difficulty in real-time dependence on student’s preferences and unique learning styles, level of comprehension, and previous performance. The other multifaceted benefits of ITSs as identified by Salem (2015:197) are depicted in Figure 5.4 below. </P>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_48.jpg"/>

<Caption>
<P>Figure 5.4: The multifaceted role of ITSs in education. (Source: Adapted from Salem 2015:197) </P>
</Caption>
</Figure>

<P>These range from enabling the possibility of employing flexible and tailored teaching methods and support measures, enhancing students’ learning experiences, creating and facilitating new kinds of learning experiences, as well as the distribution of training and learning materials for students, enabling authentic learning environments and collaboration. </P>
</Sect>

<Sect>
<H5>Automated Grading Systems </H5>

<P>Adopting automated grading systems remains a watershed moment in education. Like other teaching-enhanced systems and techniques, automated grading systems are significantly benefiting from big data and the AI-ML integration. The systems thrive on the availability of large volumes of data which are collected and draw a variety of insight from these data. It is on this purported insight that PA play a crucial role by revealing students’ behavioural patterns and predicting their academic performances. The content vector analysis technique (Chauhan, Saharan, Singh, &amp; Sharma 2020:4 of 7) digs deep into students’ responses such as texts, essays, numeric answers, and in some cases images. Therefore, through ML and DL algorithm capabilities, Lin et al. (2023:3 of 22) signify that grading is automated, thus providing an added benefit of reducing routine tasks for teachers and markers of grading assessments. For students, they leverage timeous and personalised feedback. For academic staff, integrating automated grading means that inefficiencies inherent in manual grading systems are eliminated, the grading process is streamlined, and errors are minimised (Zhu, Lin, Jain, &amp; Zhou 2023:13344). </P>

<P>Notwithstanding the benefits of ITSs, its development is definitely evolving with technological advancement. Therefore, this journey is not without shortcomings or implications, one of which includes accessibility and inclusivity. Therefore, more research is necessary to understand the implications and benefits of ITSs, especially when taking into cognisance students and staff from the marginalised and disadvantaged groups, especially those with learning difficulties and other forms of disabilities. Some of the associated challenges and ethical implications are explored next. </P>
</Sect>
</Sect>

<Sect>
<H4>Challenges and Ethical Implications of Using Big Data, AI, and Predictive Analytics Techniques </H4>

<P>Big data and AI technologies offer an advantage of allowing users the ability to accumulate, process, and integrate vast amounts of data from a variety of sources in no time. This capability has yielded a number of benefits as discussed in the previous section. Through data analytics tools, researchers are now more than ever before able to access, integrate, and leverage expertise and knowledge across various disciplines – from physics, science, and computing, to mathematics, biology, and statistics. Within biomedicine and the health fraternity, medical diagnosis and clinical solutions are also pointing to the success of AI and big data in the field of health (Madani, Arnaout, Mofrad, &amp; Arnaout 2018:1 of 8). </P>

<P>Despite the reported benefits, ethical dilemmas exist, and it is therefore important as Ben-Porath and Ben Shahar (2017:245) note that the implementing of big data and AI in education needs to be complemented by vigorous ethical frameworks and considerations to ensure a fair and transparent education process where equity is practised. </P>

<P>With ghost detection technologies, the downside here could be associated with false positives and false negatives. In the former, systems could fail to detect when the ghostwriter has indeed been utilised, whereas with the latter there could be false accusations of one purported to have used a ghostwriter when that was not the case. Lines (2016:889) warns that with an increasing and continuously evolving technology in the education space, digital plagiarism is on the rise, and subsequently ghostwriting services are proving and boost the potential of producing high quality services, thus making it difficult to detect. This is akin to essay mills and contract cheating as identified by Medway, Roper, and Gillooly (2018) in the work they did on covert investigation within the HE sector of the UK (United Kingdom). Sharing her findings, Lines is alarmed that the majority of these essay mills could have been easily submitted for assessment without any act of plagiarism being detected (Lines 2016:889). For the purpose of the current chapter, fairness and bias in algorithmic systems and decision-making, student privacy, and governance issues are ethical challenges that are in detail explored alongside their mitigating strategies in the next three sections. </P>

<Sect>
<H5>Algorithmic Fairness and Bias in Academic Decision-Making </H5>

<P>It is undeniable that AI algorithms such as ML, DL, neural networks, and expert systems anchor and continue to transform educational processes and settings through NLPs (Hjuler 2019:4). The by-product of these algorithms which eventually becomes a critical ethical consideration is to ensure that academic decision-making is fair and free from bias. Algorithms used in assessment tools, grading systems, and even admission processes may inadvertently perpetuate biases present in historical data. For instance, if training data used to develop an AI system reflects historical disparities, the algorithm might inadvertently reinforce existing inequalities. This may well lead to a IHE’s practices being questioned under suspicion of stereotyping, which is purported to be an act of practising unfairness and discrimination. Therefore, the disadvantaged individuals from certain demographics of the population who may feel that they are being discriminated against may accuse the institution, thereby bringing its name into disrepute. This will then tarnish its image and integrity, and consequently making associates and alumni of the institution to suffer from reputational damage. </P>

<P>The implications are what may well be thought to be revolutionising processes, practices, and procedures though AI may inadvertently raise concerns about an institution’s fairness, equity, and the likelihood that certain demographics may be disadvantaged. IHEs therefore need to be proactive in devising measures that are aimed at addressing potential algorithmic bias and unfairness. Their AI systems and processes need to be regularly audited and refined in line with the best practices. Furthermore, these systems should abide with compliance requirements of the regulatory bodies such as the government, local authorities, and accreditation institutions (Mishra 2019:1025). Lastly, diversity forms an integral part of an inclusive environment where diverse perspectives should be incorporated into AI development and auditing. This will help to ensure that unintended consequences are mitigated and an equal and fair representation for both students and academic communities is achieved. </P>
</Sect>

<Sect>
<H5>Preservation of Student Privacy in the Era of AI </H5>

<P>Data-driven insights and decision-making are the cornerstones of AI advancement and big data (Hjuler 2019:66-67). In the era of AI, crucial issues in the application of AI in education centre around data. Integrating AI in the education space habitually and inevitably often encompasses processes by which students’ data are collected and analysed. These are usually voluminous amounts of data, and in some cases, students are oblivious of this practice. Leslie (2019:5) talks of the invasion of privacy as one of the potential harms posed by AI systems owing to their design, development, and deployment procedures. For example, during the deployment of AI systems, data subjects can be targeted, nudged, and profiled obliviously of their knowledge or consent. This may lead to violations of data privacy and data breach. </P>

<P>While student data can be invaluable for personalising learning experiences, it also raises ethical concerns related to privacy. Two elements worth mentioning here are mechanisms and practices of ensuring that the collected data are protected, and privacy is assured. Striking a delicate balance between leveraging the AI-geared insights and benefits is thus required of IHEs to guard against the misuse and unscrupulous handling of student data and the undue invasion of their privacy. This, therefore, calls for an honest and responsible use and preservation of student data whilst protecting their rights at the same time. The aim is to allow IHEs to leverage insights from data centralisation without student privacy being compromised. As an attempt to circumvent against unethical practices possibly compromising privacy, Akgun and Greenhow (2022:431) as well as Samad, Arshad, and Siraj (2021:17) through their PPDM (privacy preserving data mining) model recommend the following measures to preserve student privacy: </P>

<L>
<LI>
<Lbl>• </Lbl>

<LBody>Ensuring compliance-friendly data collection processes and practices. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Implementing and practising robust and secure measures towards protecting data, such as authorised access, anonymity, and informed consent. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Employing data privacy-preserving AI techniques, such as federated learning. </LBody>
</LI>
</L>
</Sect>

<Sect>
<H5>Practising AI Governance in Education and Safeguarding Academic Integrity: A Lens of Generative AI, Responsible AI, and Explainable AI </H5>

<P>Stephen Hawking was quoted as follows in Akgun and Greenhow (2022:431): ‘Success in creating AI would be the biggest event in human history. Unfortunately, it might also be the last, unless we learn how to avoid the risks.’ It cannot be denied that as AI is evolving, so is the proliferation of related tools. This evolution presents an abundance of opportunities in various sectors, and education is one of them. The emergence of and AI’s ascendancy in the education landscape has ushered in new transformative opportunities and challenges. The associated benefits range from data-driven insights, enhanced support for students and educators, improved and automated assessment methods, and personalised learning experiences (Akgun &amp; Greenhow 2022:431; Remian 2019:4). </P>

<P>Striking the right balance is one step towards distinguishing between responsible and ethical use of AI in education. Transparency and explainability fall within the domain of responsible AI and XAI (explainable artificial intelligence) (OECD 2023). Lin et al. (2023:2 of 22) expound that as much as XAI is gradually gaining attention within the realm pf computer science, its popularity within the education space is still in the introductory stages. The outlined principles of XAI as Faggella (2018) elucidates, strive to ensure that with AI integration, inclusive growth and sustainable development should be prioritised. As far as a people-centric view is concerned, the adoption of AI should be centred on humane values and fairness. XAI refers to a set of processes and methods that allow human users to comprehend and trust the ML algorithm’s outputs. Additionally, explainability can increase accountability and governance as practising trust in AI will potentially guarantee human value (Chamola, Hassija, Sulthana, Ghosh, Dhingra, &amp; Sikdar 2023:78998). </P>

<P>To keep up with the continuous demand of more accurate AI models, hard-to-explain (black-box) models are used. Not being able to explain these models makes it difficult to achieve user trust and to pinpoint challenges (bias, parameters, etc.), which can result in unreliable models that are difficult to scale. Due to these concerns, the practice of XAI is fast gaining momentum. Samoili et al. (2020:6) bring to the fore the issue of black-box AI, which is described as scenarios where the ability to track AI reasoning for certain decisions is marred with impossibilities. Such scenarios require XAI principles to provide some form of explanations for AI actions and decisions. </P>

<P>Arrieta, Díaz-Rodríguez, Del Ser, Bennetot, Tabik, Barbado, García, Gil-López, Molina, Benjamins, and Chatila (2020:83</P>

<P>84) caution that XAI is often confused with interpretability. The authors go further to explain that challenges impeding the development of shared understandings and meaning revolve around the interchangeable misapplication of the terms ‘interpretability’ and ‘explainability’ in scholarly works. These concepts exhibit distinct differences. Initially, interpretability pertains to the inherent trait of a model, signifying the extent to which the model is comprehensible for a human observer. This attribute is alternatively described as transparency. In contrast, explainability represents an active aspect of a model, encompassing any action or procedure undertaken by the model to elucidate or delineate its internal operations. Key stakeholders – students, educators, and policy makers – require a clear and shared understanding of how AI systems influence educational processes. </P>

<P>Transparency and Accountability </P>

<P>In the era of Gen-AI, transparency and accountability are 
<Link>cornerstones of ethical practice. Along the same lines, the OECD. </Link>

<Link>AI Policy Observatory (OECD 2024) by the OECD (Organisation for </Link>
Economic Cooperation and Development) goes further to identify value-based principles and associated recommendations for policy makers as depicted in Figure 5.5. </P>

<P>These principles can be enabled by XAI and responsible AI. Both transparency and accountability are the anchors of the corporate governance fraternity and have for the longest time been reference points for governance-friendly institutions (Tjano 2021:175). The King IV version of corporate governance by the IoDSA (Institute of Directors South Africa’s) has also put an emphasis on these principles. With these principles in mind, the OECD’s position is promoting AI that instils innovation and trust, but most importantly ensures that human rights and democratic values are respected unconditionally (Perset 2024). Equal to the task of ensuring that the use of AI in education is free from bias and is fair, data are protected from misuse, and entrenching responsible AI, the other two governance principles worth practising are transparency and accountability. Responsible AI is underpinned on the principle that development, deployment, and the usage of AI systems are guided by ethical and responsible conduct. The ultimate end is striking a balance between AI benefits and risks’ reduction. This will provide an assurance that AI systems conform to social norms and human values, thus embraced with concerns. Furthermore, this will also dilute the debate between AI and human intelligence. </P>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_49.jpg"/>

<Caption>
<P>Figure 5.5: OECD AI principles. (Source: OECD 2024) </P>
</Caption>
</Figure>

<P>The OECD recommended that institutions should gear trustworthy efforts towards the international cooperation for AI. Without exercising accountability, trust will be difficult to attain. A high-level expert group in Arrieta et al. (2020:105) highlights the auditability of AI algorithms, data, and the design process, redressing unjust practices, minimising, and reporting negative impacts of AI systems as the key attributes towards attaining accountability in the use of AI. In line with these principles and recommendations for policy makers, IHEs should equally be put to task to ensure that the adherence of AI governance is beyond reproach. The following recommendations are provided: </P>

<L>
<LI>
<Lbl>• </Lbl>

<LBody>There is a need to proactively communicate how AI is utilised in various aspects of education, from automated grading systems to adaptive learning platforms. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Transparent policies regarding the use of AI in assessment should be established, addressing concerns related to bias, data privacy, and the overall impact on academic integrity. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Mechanisms for accountability, such as regular audits and external reviews, should be in place to ensure that AI systems align with ethical standards and educational goals. </LBody>
</LI>
</L>

<P>Ownership of Content </P>

<P>Another element of the negative consequences of AI authoring tools is that of co-authoring which, according to Biermann (2022:22-23), puts a threat on the control, autonomy, and ownership at the expense of the author. As to who owns the content that comes from LLMs remains a debatable issue. The threat on ownership is exactly what the Australian writers’ guild was against as AI was understood to be threatening ownership of authors and their compensation. As previously stated, LLMs are part of supervised ML trained on massive amounts of data. The downside of this could have unintended biases and inaccuracies as outcomes. This point is affirmed by Poola (2023:16) who points out difficulties with LLMs and associated hallucinations. Poola adds that ChatGPT for example, has computational shortcomings, which could lead to misconceptions, errors of judgements, or incorrect interpretations (Poola 2023:16). The outcome of this may well be generated text that is misleading or harmful. </P>

<P>Fact-Checking </P>

<P>The superlative capability of LLMs lies in their ability to produce impressive fluent output, which research has indicated that humans are struggling to detect and distinguish it from human-generated texts (Wahle, Ruas, Kirstein, &amp; Gipp 2022:12). Notwithstanding this unique and outstanding aura of performance, most of the models have limitations that cannot be overlooked. LLMs have been reported to hallucinate (Poola 2023:16), lacking semantic coherency, having less diverse lexical ability (Gehrmann, Dai, Elder, &amp; Rush 2018:2), limited to recent events and data (Perkins 2023:7). Nonetheless, researchers are hard at work trying to find solutions to overcome these issues and enhance the model’s accuracy and dependability (Poola 2023:16). </P>

<P>With these developments, generative performance transformers are being trained to stipulate their confidence level when generating output, whether it is factually incorrect or misinterpreted (Lin et al. 2023:3 of 22). Likewise, if one is using ChatGPT and is not satisfied with the output it generates, one can challenge it through correct and accurate facts. What the model does in these instances is to adjust its responses accordingly whilst learning at the same time. Therefore, given ethical implications, these models have on academic integrity and it is important that the output generated by these models is fact-checked for accuracy and correctness (Perkins 2023:7 of 24). </P>

<P>Ethical Utilisation of AI (AI Ethics) </P>

<P>Both XAI and responsible AI are influencing ethics with reference to the use of AI. Due to the reported range of harms, misuses, and abuses at individual and societal levels, the field of AI ethics emerged. The following are six forms of potential harms that can be caused by AI systems as identified by Leslie (2019:4-5): </P>

<L>
<LI>
<Lbl>• </Lbl>

<LBody>Bias and discrimination. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Denial of individual autonomy, recourse, and rights. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Non-transparent, unexplainable, or unjustifiable outcomes. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Invasions of privacy. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Isolation and disintegration of social connection. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Unreliable, unsafe, or poor-quality outcomes. </LBody>
</LI>
</L>

<P>The fourth strand of value-based principles as described by the OECD include robust, secure, and safe measures which were covered in the previous section. Assuring honesty and ethical considerations in the use of data is of paramount important. Once transparency is lacking, trust is by extension eroded, thus enabling an environment where ethical concerns fester. This closely connects with the fifth strand of the OECD’s principles on AI, which is accountability. Vigorous governance frameworks and mechanisms are necessary to ensure accountability in developing, deploying, and using AI. This includes defining roles and responsibilities, adhering to legal and ethical standards, and implementing processes for monitoring, auditing, and addressing any issues or risks that arise. </P>

<P>In addition to the above-discussed challenges, Johnson (2023) identifies more and more reliance on technology, a limitation in the amount of data for training AI algorithms, a lack of capacity development for users (for example, students, administrators, and educators), and high costs of AI-powered tools and techniques as among some of the challenges that may hinder IHEs’ quest to adopt these techniques. With these challenges in mind, this may affect inclusivity and access, which need keen consideration for all stakeholders involved. Luan et al. (2020:6-7 of 11) raise the issue of costs through the commercialisation of intelligent educational tools and systems and accede that novel features can be priced to also expand a revenue stream for developers. However, there must be a balance and mutually exclusive beneficiation between academics and the developers of these tools in the industry. </P>

<P>AI technologies and big data techniques can to an extent do good to governments, thus improving their efficiency and decision-making (Eggers, Schatsky, &amp; Viechnicki 2017:2; Martinho-Truswell 2018). Mehr (2017) observes that governments are yet to fittingly embrace the adoption just as much as the private sector did. This should be concerning considering the role played by governments in their developmental agenda and upliftment of the societies they serve. Therefore, these advancements have the potential to influence the public’s interaction with the government and public policy goals and development (Wirtz, Weyerer, &amp; Geyer 2018:596). For example, Martinho-Truswell (2018) states that traffic departments can use AI for assessing exam papers. This function could be in instances of government-run tests such as psychometric tests for employment, driving permits, and licenses. In some countries, governments use virtual assistants or chatbots to serve the public: According to the Institute of Public Administration Australia, their government relies on AI systems to predict crime, traffic congestions, and road maintenance needs, and then dispatch the necessary assistance. Mehr (2017) adds that the Australian government utilises chatbots to help citizens complete forms, schedule appointments, and search civil documents. Likewise in Estonia, the government uses AI systems through automated push services to interact with the public via chatbots to register new births and bereavements and provide updates on statistics such as unemployment (Moltzau 2020). In the next section, the future implications of AI and big data integration on the role of RERCs is explored. </P>
</Sect>
</Sect>

<Sect>
<H4>The Future Implications of AI and Big Data on Research Ethics Review Committees </H4>

<P>It is inevitable that industries are witnessing the increasing use of big data. However, just as this irresistible adoption is on the back of reported various benefits, there are equally associated implications. Luan et al. (2020:7 of 11) identify a number of major challenges and propose solutions in the area of research, policymaking, and industry. From a research perspective, the authors highlight that 1) the mode of teaching and learning is persistently transitioning to personalised and adaptive learning environments; </P>

<P>2) there is a single domain for educational AI research that is pro intelligent computing; and 3) machine-generated data need careful consideration redesign. For policy-making, the paradigm shift from traditional teaching methods and techniques to the digital world implies a shift in policies and guidelines, therefore there is a dire need for data privacy and the protection of personal information against unauthorised access and undue commercial exploitation. From an industrial perspective, commercialisation and high costs of intelligent tools remain a big challenge as these affect inclusivity and access. </P>

<P>Kanika and Khan (2017:195) cite security issues and warn that these issues may potentially invade one’s privacy – be it an individual or institution. Taking cognisance that these issues may well transcend into the HE landscape, it is important that a risk-oriented mindset and ethical lens in harnessing the potential of use of AI in the big data space is adopted. This position is premised on the potential impact of AI and big data on the work of RERCs. </P>

<P>The work of Ferretti, Ienca, Velarde, Hurst, and Vayena (2021b:138) sheds a light on challenges posed by big data research on the functions of RERCs, citing the magnitude of big data research projects. The authors argue that typical of these projects, they tend to have a scope that broadly requires a unique expertise, comprising new actors and making use of exceptional methodological approaches (Ferretti, Ienca, Sheehan, Blasimme, Dove, Farsides, Friesen, Kahn, Karlen, Kleist, &amp; Liao 2021a:4 of 13). With this peculiarity, RERCs are typically constituted of skills and expertise from a variety of backgrounds (Nichols 2016:351), thus enabling the infusion of cross discipline (Garrard &amp; Dawson 2005:419). Reviewers are grouped and get allocated content that match their strengths and expertise. </P>

<P>The aim of the ethical review process is to ensure that ethical guidelines during the research process are adhered to by researchers, to guard against unethical or illegal acts (Dove 2020:4). All in all, the end goal is to validate ethical procedures of the research being undertaken (Biagetti, Gedutis, &amp; Ma 2020:1-2) to mitigate risks (Ferretti et al. 2021:8 of 13) and to reassure public and data subjects that the research process can be trusted (Resnik 2018:87). </P>

<P>In the era of AI and big data, it is important that the expertise of RERC members is assessed to ensure that review processes are not compromised. Once the process is compromised, trust and the validity of the research results and adopted methodologies are questioned. Ferretti et al. (2021a:8 of </P>

<P>13) identify persistent and novel weaknesses pertaining to failures of RERCs. Three weaknesses are identified, namely functional, purview, and persistent weaknesses, which are depicted in Figure 5.6. The functional and purview weaknesses are identified by the authors as novel, which implies that they are linked with traditional research ethics and are specific to the nature of big data projects (Ferretti et al. 2021a:3-4 of 13). The functional weaknesses stem from RERCs’ inabilities and inadequacies to review big data research projects. The purview weaknesses on the other hand relate to big data projects that may likely skew the horizon of RERCs. </P>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_50.jpg"/>

<Caption>
<P>Figure 5.6: Weaknesses and reforms pertaining to the impact </P>
</Caption>
</Figure>

<P>of AI and big data on the work of RERCs. (Source: </P>

<P>Ferretti et al. 2021a:8 of 13) </P>

<P>The persistent weaknesses include a lack of transparency in the review process, capacity, a load to review projects, inconsistent review criteria, conflicting regulatory frameworks across jurisdictions, and a lack of an unsuitable environment to redistribute research risks and associated benefits. The proposed reforms to mitigate against these weaknesses are also indicated in the figure above, and these include regulatory, procedural, and complementary reforms (Ferretti et al. 2021a:8 of 13). Having discussed the implications brought by big data research and AI on the functions of RERCs, the conclusion focuses on the future trends that can be expected from the evolution of AI and related challenges. </P>
</Sect>

<Sect>
<H4>Conclusion </H4>

<P>The ascendancy and transformative power of big data, AI, and PA in the field of education has caused the ethics of research, teaching, and related procedures to be undoubtedly reshaped. The chapter delved in on the symbiotic relation between algorithms and education, exploring the nuances of their implementation. </P>

<P>These include writing style development detecting abilities, authorship verification, ghostwriter detection tools in academic writing, and AI-text generation tools. The chapter has dissected the intricate balance between algorithms and educational outcomes, which has shed a light on the implications and challenges associated with their integration into pedagogical practices. PA tools and their uses were discussed, including IESs, ITSs, and automated grading systems. From the perspective of research, responsive strategies on account of protecting and enhancing research and academic integrity were enacted. Through this lens, potential benefits, challenges, and possible governance measures of adopting and integrating big data, AI, and PA in education were discussed. These strategies are underpinned by the governance of AI, where protocols, guidelines and policy framework aimed at managing unethical conduct and malpractices in research were developed. This aligns with principles of responsible and ethical deployment, development, and application of AI tools. The work of RERCs was also not spared from this disruption. Various committees had to intensify issues relating to ethical consideration in line with their respective institutions’ research ethics policies and guidelines. That said, this necessitated a need to focus on operational informed consent in the era of AI, big data, and PA. Other related principles such as confidentiality, prevention of harm, data privacy, addressing bias and discrimination in algorithms, transparency, and accountability also had to be re-examined. This journey of reform meant that power dynamics in the field of ethics review processes and research governance has to adjust with the time. In essence, these advancements necessitated the need for stronger and robust ethical frameworks to empower RERCs to minimise and mitigate ethical blurred lines. </P>

<P>As far as challenges are concerned, ethical concerns dominate the discourse in the literature. For this chapter, I draw on weaknesses categorised as functional, purview, and persistent being documented by Ferretti et al. (2021a). The functional weaknesses relate to among others, a lack of proper expertise in regulating and reviewing research projects within the big data and PA space. These challenges are trailed by difficulties in operationalising the informed consent, following ex-ante review models and inability to comprehend unforeseeable risk assessments. On the purview category, challenges relate to difficulties of dealing with new actors conducting research, how to anonymise data and the broad scope of big data-related studies. The last category deals with persistency and challenges such as lack of transparency, conflicting regulations across various jurisdictions, and inconsistent review practices and criteria within and across various RERCs. </P>

<P>With these challenges, there are associated opportunities that arise. These may manifest themselves through reforms that may need to be instituted to mitigate risks and ethical concerns. These reforms can be categorised as regulatory, procedural, and complementary. The regulatory opportunities imply that there is a need to provide a new guidance on the practice of ethics in this new era. Furthermore, the purview of RERCs need to be expanded to bring new expertise. Given that AI transcends beyond borders, there is also a need to specify foundational global standards for the assessment of ethics. Global bodies like the OECD, European Commission on AI, and UNESCO (United Nations Educational, Scientific, and Cultural Organisation) have already started in this regard. </P>

<P>From a procedural perspective, there is a need to introduce new ethical assessment tools and systems to guide the behaviour of various role players such as researchers, ethics managers, and review members. This may also mean that the membership of the RERCs need re-examination and adjustment to ensure that there is a diversity of skills in the board, preferably the presence of external experts. From a complementary viewpoint, creating hybrid mechanisms for the review process will assist in mitigating the risks that come with the new era. This can be augmented by introducing new specialised committees to ensure that unfamiliar or too technical research projects related to AI and big data are properly assessed for ethical compliance. In parting shots, the multifaceted roles and capabilities of AI, big data, and PA in shaping the landscape have brought about challenges and opportunities. These changes mean that educators, research review committees, and educational institutions have to relook their governance processes, protocols, and policy frameworks. </P>
</Sect>

<Sect>
<H4>References </H4>

<P>Akgun, S. &amp; Greenhow, C. 2022. Artificial intelligence in education: Addressing ethical challenges in K-12 settings. AI and Ethics 
<Link>2(3):431-440. https://doi.org/10.1007/s43681-021-00096-7 </Link>
</P>

<P>Alexandru, A., Alexandru, CA., Coardos, D., &amp; Tudora, E. 2016. Healthcare, big data and cloud computing. Management 1(2):123-131. </P>

<P>Ali, HIH. &amp; Alhassan, A. 2021. Fighting contract cheating and ghostwriting in higher education: Moving towards a multidimensional approach. Cogent Education
<Link> 8(1). 1885837. 18 pages. https://doi.or </Link>

<Link>g/10.1080/2331186X.2021.1885837 </Link>
</P>

<P>Arrieta, AB., Díaz-Rodríguez, N., Del Ser, J., Bennetot, A., Tabik, S., Barbado, A., García, S., Gil-López, S., Molina, D., Benjamins, R., &amp; Chatila, R., 2020. Explainable artificial intelligence (XAI): Concepts, taxonomies, opportunities and challenges toward responsible AI. Information fusion
<Link> 58:82-115. https://doi. </Link>

<Link>org/10.1016/j.inffus.2019.12.012 </Link>
</P>

<P>Assem, H., Ghariba, S., Makrai, G., Johnston, P., Gill, L., &amp; Pilla F. 2017. Urban water flow and water level prediction based on deep learning. In Altun, Y. (Ed.): Machine learning and knowledge discovery in databases
<Link>, 317-329. Cham: Springer. https://doi. </Link>

<Link>org/10.1007/978-3-319-71273-4_26 </Link>
</P>

<P>ATRIA. 2023. How does Chat GPT work? Atria Innovation. 5 January 2023. 
<Link>Available at: https://www.atriainnovation.com/en/how-does</Link>

<Link>chat-gpt-work/</Link>
. (Accessed on 12 January 2024). </P>

<P>Baradwaj, BK. &amp; Pal, S. 2012. Mining educational data to analyze students’ performance. International Journal of Advanced Computer Science and Applications
<Link>, 2(6):63-69. http://dx.doi.org/10.14569/ </Link>

<Link>IJACSA.2011.020609 </Link>
</P>

<P>Barlow, HB. 1989. Unsupervised learning. Neural Computation 1(3):295-311. 
<Link>https://doi.org/10.1162/neco.1989.1.3.295 </Link>
</P>

<P>Ben-Porath, S. &amp; Ben Shahar, TH. 2017. Introduction: Big data and education: Ethical and moral challenges. Theory and Research in Education
<Link> 15(3):243-248. https://doi. </Link>

<Link>org/10.1177/1477878517737201 </Link>
</P>

<P>Bhushan, M., Shingate, RS., Vyas, T., Naman, T., &amp; Shirsath, S. 2023. Intelligent tutoring system: Personalised learning 
<Link>plans with AI. Available at: https://www.researchgate.net/ </Link>

<Link>publication/375696884_intelligent_tutoring_system_ </Link>

<Link>personalised_learning_plans_with_ai</Link>
. 
<Link>https://doi. </Link>

<Link>org/10.13140/RG.2.2.36573.59369 </Link>
</P>

<P>Biagetti, MT., Gedutis, A., &amp; Ma, L. 2020. Ethical theories in research evaluation: Exploratory. Scholarly Assessment Reports 2(1):1-9. 
<Link>https://doi.org/10.29024/sar.19 </Link>
</P>

<P>Biermann, OC. 2022. Writers want AI collaborators to respect their personal values and writing strategies: A human-centered perspective on AI co-writing. Doctoral thesis, University of 
<Link>British Columbia, Vancouver. Available at: https://open.library. </Link>

<Link>ubc.ca/collections/ubctheses/24/items/1.0420422</Link>
. (Accessed on 15 June 2024). </P>

<P>bin Mat, U. &amp; Buniyamin, N. 2017. Using neuro-fuzzy technique to classify and predict electrical engineering students’ achievement upon graduation based on mathematics competency. Indonesian Journal of Electrical Engineering and Computer Science 5(3):684
<Link>690. https://doi.org/10.11591/ijeecs.v5.i3.pp684-690 </Link>
</P>

<P>Bromley, J., Guyon, I., LeCun, Y., Säckinger, E., &amp; Shah, R. 1993. Signature verification using a ‘siamese’ time delay neural network. Advances in Neural Information Processing Systems 6:737-744. 
<Link>https://doi.org/10.1142/S0218001493000339 </Link>
</P>

<P>Carbonell, JR. 1970. AI in CAI: An artificial-intelligence approach to computer-assisted instruction. IEEE Transactions on Man-Machine Systems
<Link> 11(4):190-202. https://doi.org/10.1109/ </Link>

<Link>TMMS.1970.299942 </Link>
</P>

<P>Chamola, V., Hassija, V., Sulthana, AR., Ghosh, D., Dhingra, D., &amp; Sikdar, </P>

<P>B. 2023. A review of trustworthy and explainable artificial intelligence (XAI). IEEE Access
<Link> 11:78994-79015. https://doi. </Link>

<Link>org/10.1109/ACCESS.2023.3294569 </Link>
</P>

<P>Chassignol, M., Khoroshavin, A., Klimova, A., &amp; Bilyatdinova, A. 2018. Artificial intelligence trends in education: A narrative overview. Procedia Computing Science
<Link> 136:16-24. https://doi.org/10.1016/j. </Link>

<Link>procs.2018.08.233 </Link>
</P>

<P>Chauhan, RK., Saharan, R., Singh, S., &amp; Sharma, P. 2020. Automated content grading using machine learning. arXiv:2004.04300. 7 
<Link>pages. https://doi.org/10.48550/arXiv.2004.04300 </Link>
</P>

<P>Chen, CP. &amp; Zhang, CY. 2014. Data-intensive applications, challenges, techniques and technologies: A survey on big data. Information Sciences
<Link> 275:314-347. https://doi.org/10.1016/j.ins.2014.01.015 </Link>
</P>

<P>Chen, L., Chen, P., &amp; Lin, Z. 2020. Artificial intelligence in education: A review. IEEE Access
<Link> 8:75264-75278. https://doi.org/10.1109/ </Link>

<Link>ACCESS.2020.2988510 </Link>
</P>

<P>Cheng, Y., Chen, K., Sun, H., Zhang, Y., &amp; Tao, F. 2018. Data and knowledge mining with big data towards smart production. Journal of Industrial Information Integration
<Link> 9:1-13. https://doi.org/10.1016/j. </Link>

<Link>jii.2017.08.001 </Link>
</P>

<P>Chicco, D. 2021. Siamese neural networks: An overview. In Cartwright, </P>

<P>H. (Ed.): Artificial neural networks, 73-94. Methods in 
<Link>Molecular Biology. Vol 2190. New York: Humana. https://doi. </Link>

<Link>org/10.1007/978-1-0716-0826-5_3 </Link>
</P>

<P>Choi, Y. &amp; McClenen, C. 2020. Development of adaptive formative assessment system using computerized adaptive testing and dynamic Bayesian networks. Applied Sciences
<Link> 10(22):1-17. https:// </Link>

<Link>doi.org/10.3390/app10228196 </Link>
</P>

<P>Chopra, S., Hadsell, R., &amp; LeCun, Y. 2005. June. Learning a similarity metric discriminatively, with application to face verification. 2005 IEEE Computer Society Conference On Computer Vision And Pattern Recognition (CVPR’05)
<Link> 1:539-546. https://doi.org/10.1109/ </Link>

<Link>CVPR.2005.202 </Link>
</P>

<P>Chu, C-T., Kim, SK., Lin, Y-A., Yu, YY., Bradski, G., Ng, A. &amp; Olukotun, K. 2006. Map-reduce for machine learning on multicore. Advances in Neural Information Processing Systems 19. 8 pages. Available at: 
<Link>http://books.nips.cc/nips19.html</Link>
. (Accessed on 12 January 2024). </P>

<P>Cox, M. &amp; Ellsworth, D. 1997. Managing big data for scientific visualization. IEEE, Proceedings of the 8th Conference on Visualization’97, 5-17. </P>

<P>Coyne, KP. 1986. Sustainable competitive advantage: What it is, what it isn’t. Business Horizons
<Link> 29(1):54-61. https://doi. </Link>

<Link>org/10.1016/0007-6813(86)90087-X </Link>
</P>

<P>De Vries, K. 2022. Let the robot speak! AI-generated speech and freedom of expression. In Hindelang, S. &amp; Moberg, A. (Eds.): YSEC Yearbook of Socio-Economic Constitutions 2021: Triangulating Freedom of Speech
<Link>, 93-115. Cham: Springer. https://doi. </Link>

<Link>org/10.1007/16495_2021_38 </Link>
</P>

<P>Deperlioglu, O. &amp; Birtil, FS. 2016. Analysis of girls vocational high school students’ academic failure causes with data mining techniques. The Anthropologist
<Link> 23(3):505-512. https://doi.org/10.1080/09720 </Link>

<Link>073.2014.11891970 </Link>
</P>

<P>Dove, ES. 2020. Regulatory stewardship of health research: Navigating participant protection and research promotion. Cheltenham: 
<Link>Edward Elgar Publishing. https://doi.org/10.4337/9781788975353 </Link>
</P>

<P>Draxler, F., Werner, A., Lehmann, F., Hoppe, M., Schmidt, A., Buschek, D., &amp; Welsch, R. 2023. The AI ghostwriter effect: Users do not perceive ownership of AI-generated text but self-declare as authors. arXiv preprint arXiv:2303.03283
<Link>. 40 pages. https://doi. </Link>

<Link>org/10.1145/3637875 </Link>
</P>

<P>Eggers, WD., Schatsky, D., &amp; Viechnicki, P. 2017. AI-augmented government: Using cognitive technologies to redesign public sector work. Deloitte Insights. 26 April 2017. Available at: 
<Link>https://www2.deloitte.com/us/en/insights/focus/cognitive</Link>

<Link>technologies/artificial-intelligence-government.html</Link>
. (Accessed on 21 January 2024). </P>

<P>Ekman, M. 2021. Learning deep learning: Theory and practice of neural networks, computer vision, natural language processing, and transformers using TensorFlow. Boston: Addison-Wesley Professional. </P>

<P>Emanuel, EJ. &amp; Wachter, RM. 2019. Artificial intelligence in health care: Will the value match the hype? JAMA
<Link> 321(23):2281-2282. https:// </Link>

<Link>doi.org/10.1001/jama.2019.4914 </Link>
</P>

<P>Faggella, D. 2018. What is artificial intelligence? An informed definition. Emerj Insights
<Link>. 21 December 2018. Available at: https://emerj. </Link>

<Link>com/ai-glossary-terms/what-is-artificial-intelligence-an</Link>

<Link>informed-definition/</Link>
. (Accessed on 13 January 2024). </P>

<P>Ferretti, A., Ienca, M., Sheehan, M., Blasimme, A., Dove, ES., Farsides, B., Friesen, P., Kahn, J., Karlen, W., Kleist, P., &amp; Liao, SM. 2021a. Ethics review of big data research: What should stay and what should be reformed? BMC Medical Ethics 22(1). 51. 13 
<Link>pages. https://doi.org/10.1186/s12910-021-00616-4 </Link>
</P>

<P>Ferretti, A., Ienca, M., Velarde, MR., Hurst, S., &amp; Vayena, E. 2021b. The challenges of big data for research ethics committees: A qualitative Swiss study. Journal of Empirical Research on Human Research Ethics
<Link> 17(1-2):129-143. https://doi/ </Link>

<Link>org/10.1177/15562646211053538 </Link>
</P>

<P>Flamm, K. 1988. Creating the computer: Government, industry, and high technology. Washington DC: Brookings Institution. </P>

<P>Garrard, E. &amp; Dawson, A. 2005. What is the role of the research ethics committee? Paternalism, inducements, and harm in research ethics. Journal of Medical Ethics
<Link> 31(7):419-423. https://doi. </Link>

<Link>org/10.1136/jme.2004.010447 </Link>
</P>

<P>Gehrmann, S., Dai, FZ., Elder, H., &amp; Rush, AM. 2018. End-to-end content and plan selection for data-to-text generation. arXiv preprint arXiv:1810.04700
<Link>. 11 pages. https://doi.org/10.18653/v1/W18-6505 </Link>
</P>

<P>Goh, K-L., Cheong, VS., Tan, KCK., &amp; Sharma, A. 2023. Ethical, legal and social issues arising from big data and artificial intelligence (AI) use in human biomedical research. New Castle University e-Prints. 
<Link>https://doi.org/10.57711/pgd8-ma17 </Link>
</P>

<P>Grand View Research. 2023. Digital transformation market to reach 
<Link>$4,617.78 billion by 2030. 8 August 2023. Available at: https:// </Link>

<Link>www.grandviewresearch.com/press-release/global-digital</Link>

<Link>transformation-market#</Link>
. (Accessed on 7 December 2023). </P>

<P>Hastie, T., Tibshirani, R., &amp; Friedman, J. 2009. Unsupervised learning. The elements of statistical learning: Data mining, inference, and prediction
<Link>. New York: Springer. https://doi.org/10.1007/978-0</Link>

<Link>387-84858-7_14 </Link>
</P>

<P>Heikkilä, M. 2023. Why detecting AI-generated text is so difficult (and what to do about it). MIT Technology Review. 7 February 
<Link>2023. Available at: https://www.technologyreview. </Link>

<Link>com/2023/02/07/1067928/why-detecting-ai-generated-text</Link>

<Link>is-so-difficult-and-what-to-do-about-it/</Link>
. (Accessed on 13 January 2024). </P>

<P>Hjuler, N. 2019. Algorithms and AI in education. Doctoral dissertation, University of Copenhagen, Faculty of Science, Computer Science Department, Copenhagen. Available at:
<Link>https://researchprofiles. </Link>

<Link>ku.dk/en/publications/algorithms-and-ai-in-education</Link>
. (Accessed on 25 January 2024). </P>

<P>Hlosta, M., Herodotou, C., Papathoma, T., Gillespie, A., &amp; Bergamin, P. 2022. Predictive learning analytics in online education: A deeper understanding through explaining algorithmic errors. Computers and Education: Artificial Intelligence
<Link> 3. 100108. 12 pages. https:// </Link>

<Link>doi.org/10.1016/j.caeai.2022.100108 </Link>
</P>

<P>Hung, BT. 2021. Face recognition using hybrid HOG-CNN approach. Research in Intelligent and Computing in Engineering: Select Proceedings of RICE 2020
<Link>, 715-723. https://doi.org/10.1007/978</Link>

<Link>981-15-7527-3_67 </Link>
</P>

<P>Janiesch, C., Zschech, P., &amp; Heinrich, K. 2021. Machine learning and deep learning. Electronic Markets
<Link> 31(3):685-695. https://doi. </Link>

<Link>org/10.1007/s12525-021-00475-2 </Link>
</P>

<P>Jantawan, B. &amp; Tsai, CF. 2013. The application of data mining to build classification model for predicting graduate employment. International Journal of Computer Science and Information Security 
<Link>11(10):1-7. https://doi.org/10.arXiv:1312.7123 </Link>
</P>

<P>Johnson, R. 2023. The role of artificial intelligence in e-learning: Integrating AI tech into education. Atria Innovation. 27 October 
<Link>2023. Available at: https://elearningindustry.com/role-of</Link>

<Link>artificial-intelligence-in-elearning-integrating-ai-tech-into</Link>

<Link>education</Link>
. (Accessed on 14 January 2024). </P>

<P>Kanika, AA. &amp; Khan, RA. 2017. Security integration in big data life cycle. In Singh, M., Gupta, P., Tyagi, V., Sharma, A., Ören, T., &amp; Grosky, W. (Eds.): Advances in computing and data sciences, 192-200. ICACDS 2016. Communications in Computer and Information Science. Vol </P>

<P>721.
<Link> Singapore: Springer. https://doi.org/10.1007/978-981-10</Link>

<Link>5427-3_21 </Link>
</P>

<P>Karabulut, EM., Özel, SA., &amp; Ibrikci, T. 2012. A comparative study on the effect of feature selection on classification accuracy. Procedia Technology
<Link> 1:323-327. https://doi.org/10.1016/j. </Link>

<Link>protcy.2012.02.068 </Link>
</P>

<P>Khan, MA. &amp; Vivek, MA. 2022. Artificial intelligence and big data: </P>

<P>The advent of new pedagogy in the adaptive e-learning system in the higher educational institutions of Saudi Arabia. Education Research International
<Link> 2022:1-10. https://doi. </Link>

<Link>org/10.1155/2022/1263555 </Link>
</P>

<P>Khan, S., Ali, H., Ullah, Z., &amp; Bulbul, MF. 2018. An intelligent monitoring system of vehicles on highway traffic. 2018 12th International Conference on Open Source Systems and Technologies (ICOSST), 71
<Link>75. https://doi.org/10.1109/ICOSST.2018.8632192 </Link>
</P>

<P>Khan, S., Nazir, S., García-Magariño, I., &amp; Hussain, A. 2021. Deep learning-based urban big data fusion in smart cities: Towards traffic monitoring and flow-preserving fusion. Computers &amp; Electrical Engineering
<Link> 89. 106906. 11 pages. https://doi.org/10.1016/j. </Link>

<Link>compeleceng.2020.106906 </Link>
</P>

<P>Kirchner, JH., Ahmad, L., Aaronson, S., &amp; Leike, J. 2023. New AI classifier for indicating AI-written text. OpenAI. 16 April 2023. Available at: 
<Link>https://openai.com/blog/new-ai-classifer-for-indicating-ai</Link>

<Link>written-text</Link>
. (Accessed on 4 January 2024). </P>

<P>Kotsiantis, SB., Zaharakis, I., &amp; Pintelas, P. 2007. Supervised machine learning: A review of classification techniques. Informatica 31:249-268. </P>

<P>Kumar, AS. 2016. Edifice an educational framework using educational data mining and visual analytics. IJ Education and Management Engineering
<Link> 2:24-30. https://doi.org/10.5815/ijeme.2016.02.03 </Link>
</P>

<P>Kumar, CK. 2018. Artificial Intelligence: Definition, types, examples, 
<Link>technologies. 31 August 2018. Available at : https:// </Link>

<Link>chethankumargn.medium.com/artificial-intelligence</Link>

<Link>definition-types-examples-technologies-962ea75c7b9b</Link>
. (Accessed on 9 January 2024). </P>

<P>Leslie, D. 2019. Understanding artificial intelligence ethics and safety: A guide for the responsible design and implementation of AI systems in the public sector. Zenoda
<Link>. 11 June 2019. https://doi. </Link>

<Link>org/10.5281/zenodo.3240529 </Link>
</P>

<P>Liddy, ED. 2001. Natural language processing. Encyclopaedia of Library and Information Science. 2nd ed. New York: Marcel Decker. </P>

<P>Lin, CC., Huang, AYQ., &amp; Lu, OHT. 2023. Artificial intelligence in intelligent tutoring systems toward sustainable education: A systematic review. Smart Learning Environments
<Link> 10(1). 22 pages. https://doi. </Link>

<Link>org/10.1186/s40561-023-00260-y </Link>
</P>

<P>Lin, CC., Kuo, CH., &amp; Chiang, HT. 2021. CNN-based classification for point cloud object with bearing angle image. IEEE Sensors journal 
<Link>22(1):1003-1011. https://doi.org/10.1109/JSEN.2021.3130268 </Link>
</P>

<P>Lin, CJ. &amp; Mubarok, H. 2021. Learning analytics for investigating the mind map-guided AI chatbot approach in an EFL flipped speaking classroom. Educational Technology and Society 24(4):16-35. </P>

<P>Lines, L. 2016. Ghostwriters guaranteeing grades? The quality of online ghost-writing services available to tertiary students in Australia. Teaching in Higher Education
<Link> 21(8):889-914. https://doi.org/10.10 </Link>

<Link>80/13562517.2016.1198759 </Link>
</P>

<P>Lorenzen, SS., Hjuler, NOD., &amp; Alstrup, S. 2019. Investigating writing style development in high school. Proceedings of the 12th International Conference on Educational Data Mining (EDM 2019), 572-575. 
<Link>https://doi.org/10.48550/arXiv.1906.03072 </Link>
</P>

<P>Luan, H., Geczy, P., Lai, H., Gobert, J., Yang, SJ., Ogata, H., Baltes, J., Guerra, R., Li, P., &amp; Tsai, CC. 2020. Challenges and future directions of big data and artificial intelligence in education. Frontiers in Psychology 11. 580820. 11 pages. 
<Link>https://doi. </Link>

<Link>org/10.3389/fpsyg.2020.580820 </Link>
</P>

<P>Ma, Y. &amp; Guo, G. (Eds.). 2014. Support vector machines applications. Cham: 
<Link>Springer Science. https://doi.org/10.1007/978-3-319-02300-7 </Link>
</P>

<P>Madani, A., Arnaout, R., Mofrad, M., &amp; Arnaout, R. 2018. Fast and accurate view classification of echocardiograms using deep learning. NPJ Digital Medicine
<Link> 1(1). 8 pages. https://doi.org/10.1038/s41746</Link>

<Link>017-0013-1 </Link>
</P>

<P>Manne, R. &amp; Kantheti, SC. 2021. Application of artificial intelligence in healthcare: Chances and challenges. Current Journal of Applied Science and Technology
<Link> 40(6):78-89. https://doi.org/10.9734/ </Link>

<Link>cjast/2021/v40i631320 </Link>
</P>

<P>Martinho-Truswell, E. 2018. How AI could help the public sector. Harvard Business Review
<Link>. 26 January 2018. Available at: https://hbr. </Link>

<Link>org/2018/01/how-ai-could-help-the-public-sector</Link>
. (Accessed on 14 January 2024). </P>

<P>Medway, D., Roper, S., &amp; Gillooly, L. 2018. Contract cheating in UK </P>

<P>higher education: A covert investigation of essay mills. British Educational Research Journal 44:393-418. 
<Link>https://doi.org/10.1002/ </Link>

<Link>berj.3335 </Link>
</P>

<P>Mehr, H. 2017. Artificial intelligence for citizen services and government. Available at:
<Link>https://ash.harvard.edu/files/ash/files/artificial_ </Link>

<Link>intelligence_for_citizen_services.pdf</Link>
. (Accessed on 16 January 2024). </P>

<P>Mishra, R. 2019. Usage of data analytics and artificial intelligence in ensuring quality assurance at higher education institutions. 2019 Amity International Conference on Artificial Intelligence (AICAI), 
<Link>1022-1025. https://doi:10.1109/AICAI.2019.8701392 </Link>
</P>

<P>Mishra, T., Kumar, D., &amp; Gupta, S. 2016. Students’ employability prediction model through data mining. International journal of applied engineering research 11(4):2275-2282. </P>

<P>Moltzau, A. 2020. Estonia’s national strategy for artificial-intelligence. 2 
<Link>January 2020. Available at: https://medium.com/swlh/estonias</Link>

<Link>national-strategy-for-artificial-intelligence-2623259ddf4c</Link>
. (Accessed on 10 January 2024). </P>

<P>Muhammad, I. &amp; Yan, Z. 2015. Supervised machine learning approaches: A survey. ICTACT Journal on Soft Computing
<Link> 5(3):946-952. https:// </Link>

<Link>doi.org/10.21917/ijsc.2015.0133 </Link>
</P>

<P>Murty, MN. &amp; Raghava, R. 2016. Support vector machines and perceptrons: Learning, optimization, classification, and application to social networks
<Link>. Cham: Springer. https://doi.org/10.1007/978-3-319</Link>

<Link>41063-0 </Link>
</P>

<P>Nam, T. &amp; Pardo, TA. 2011. Smart city as urban innovation: Focusing on management, policy, and context. Proceedings of the 5th International Conference on Theory and Practice of Electronic Governance
<Link>, 185-194. https://doi.org/10.1145/2072069.2072100 </Link>
</P>

<P>Naveenkumar, S. 2023. Transforming education with machine learning: Personalized learning, intelligent tutoring, and educational 
<Link>games. 25 February 2023. Available at: https://www.linkedin. </Link>

<Link>com/pulse/transforming-education-machine-learning</Link>

<Link>personalized-intelligent-s/</Link>
. (Accessed on 10 January 2024). </P>

<P>Neha, K. &amp; Sidiq, SJ. 2020. Analysis of student academic performance through expert systems. International Research Journal on Advanced Science Hub
<Link> 2(Special Issue ICIES 9S):48-54. https:// </Link>

<Link>doi.org/10.47392/irjash.2020.158 </Link>
</P>

<P>Nichols, AS. 2016. Research ethics committees (RECS)/institutional review boards (IRBS) and the globalisation of clinical research: Can ethical oversight of human subjects research be standardized? Washington University Global Studies Law Review 15:351-379. </P>

<P>O’Leary, DE. 2013. Artificial intelligence and big data. IEEE intelligent systems 
<Link>28(2):96-99. https://doi.org/10.1109/MIS.2013.39 </Link>
</P>

<P>OECD (Organisation for Economic Co-operation and Development). 2023. The impact of AI on the workplace: Main findings from the OECD AI surveys of employers and workers. Available at: https://www. oecd.org/employment-outlook/2023/#ai-jobs. (Accessed on 12 December 2023). </P>

<P>OECD (Organisation for Economic Co-operation and Development). 2024. 
<Link>Principles for trustworthy AI. Available at: https://oecd.ai/en/ai</Link>

<Link>principles</Link>
. (Accessed on 12 December 2023). </P>

<P>Papamitsiou, Z. &amp; Economides, AA. 2014. Learning analytics and educational data mining in practice: A systematic literature review of empirical evidence. Journal of Educational Technology &amp; Society
<Link> 17(4):49-64. https://doi.org/10.1111/bjet.12747 </Link>
</P>

<P>Patgiri, R. &amp; Ahmed, A. 2016. Big data: The V’s of the game changer paradigm. 2016 IEEE 18th International Conference on High-Performance Computing and Communications, IEEE 14th International Conference on Smart City, and IEEE 2nd International Conference on Data Science and Systems (HPCC/SmartCity/DSS), Sydney, 17-24. 
<Link>https://doi:10.1109/HPCC-SmartCity-DSS.2016.0014 </Link>
</P>

<P>Perkins, M. 2023. Academic integrity considerations of AI large language models in the post-pandemic era: ChatGPT and beyond. Journal of University Teaching &amp; Learning Practice
<Link> 20(2). 24 pages. https:// </Link>

<Link>doi.org/10.53761/1.20.02.07 </Link>
</P>

<P>Perset, K. 2024. A pioneering year for AI and the OECD. OECD-AI. 9 January 
<Link>2024. Available at: https://oecd.ai/en/wonk/2023. (Accessed on </Link>
14 January 2024). </P>

<P>Petrosyan, A. 2023. Internet and social media users in the world 2023. Statista
<Link>. 25 October 2023. Available at: https://www.statista.com/ </Link>

<Link>statistics/617136/digital-population-worldwide/</Link>
. (Accessed on 12 January 2024). </P>

<P>Poola, I. 2023. Overcoming ChatGPT’s inaccuracies with pre-trained AI prompt engineering sequencing process. International Journal of Technology and Emerging Sciences 3(3):16-19. </P>

<P>Qian, C., He, T., &amp; Zhang, R. 2017. Deep learning based authorship 
<Link>identification. Available at: https://www.semanticscholar.org/ </Link>

<Link>paper/DeepLearning-based-Authorship-Identification-QianHe/ </Link>

<Link>ab0ebe094ec0a44fb0013d640b344d8cfd7adc81</Link>
. (Accessed on 15 January 2024). </P>

<P>Quan, Z. &amp; Pu, L. 2023. An improved accurate classification method for online education resources based on support vector machine (SVM): Algorithm and experiment. Education and Information Technologies
<Link> 28(7):8097-8111. https://doi.org/10.1007/s10639</Link>

<Link>022-11514-6 </Link>
</P>

<P>Rane, J., Kaya, O., Mallick, SK., &amp; Rane, NL. 2024. Smart farming using artificial intelligence, machine learning, deep learning, and ChatGPT: Applications, opportunities, challenges, and future directions. Generative Artificial Intelligence in Agriculture, Education, and Business
<Link>, 218-272. https://doi.org/10.70593/978</Link>

<Link>81-981271- 7-4_6 </Link>
</P>

<P>Remaida, A., Moumen, A., El Bouzekri El Idrissi, Y., &amp; Abdellaoui, B. 2021. Tuning convolutional neural networks hyperparameters for offline handwriting recognition. Proceedings of the 2nd International Conference on Big Data, Modelling and Machine Learning (BML 2021)
<Link>, 71-76. https://doi. </Link>

<Link>org/10.5220/0010728600003101 </Link>
</P>

<P>Remian, D. 2019. Augmenting education: Ethical considerations for incorporating artificial intelligence in education. Master’s dissertation, faculty of the Instructional Design Master’s Degree Program University of Massachusetts at Boston. Available 
<Link>at: https://scholarworks.umb.edu/instruction_capstone/52. </Link>
(Accessed on 15 January 2024). </P>

<P>Resnik, DB. 2018. The ethics of research with human subjects: Protecting people, advancing science, promoting trust. Cham: Springer. 
<Link>https://doi/org/10.1007/978-3-319-68756-8 </Link>
</P>

<P>Salem, ABM. 2015. Towards of intelligence education and learning. IEEE 7th International Conference on Intelligent Computing and Information Systems (ICICIS), Cairo
<Link>, 196-202. https://doi.org/10.1109/ </Link>

<Link>IntelCIS.2015.7397221 </Link>
</P>

<P>Samad, AA., Arshad, MM., &amp; Siraj, MM. 2021. Towards enhancement of privacy-preserving data mining model for predicting students learning outcomes performance. IEEE International Conference on Computing (ICOCO),
<Link> November 2021, 13-18. https://doi. </Link>

<Link>org.10.1109/ICOCO53166.2021.9673544 </Link>
</P>

<P>Samoili, S., López Cobo, M., Gómez, E., De Prato, G., Martinez-Plumed, F., &amp; Delipetrev, B. 2020. AI watch – defining artificial intelligence </P>

<P>– towards an operational definition and taxonomy of artificial intelligence. Luxembourg: Publications Office of the European 
<Link>Union. https://data.europa.eu/doi/10.2760/382730 </Link>
</P>

<P>Sen, PC., Hajra, M., &amp; Ghosh, M. 2020. Supervised classification algorithms in machine learning: A survey and review. In Mandal, </P>

<P>J. &amp; Bhattacharya, D. (Eds.): Emerging technology in modelling and graphics: Advances in intelligent systems and computing, 99-111. 
<Link>Singapore: Springer. https://doi.org/10.1007/978-981-13-7403</Link>

<Link>6_11 </Link>
</P>

<P>Sharma, A. &amp; Mansotra, V. 2019. Deep learning based student emotion recognition from facial expressions in classrooms. International Journal of Engineering and Advanced Technology 8(6):4691-4699. 
<Link>https://doi.org/10.35940/ijeat.F9170.088619 </Link>
</P>

<P>Sheth, A. 2014. Transforming big data into smart data: Deriving value via harnessing volume, variety, and velocity using semantic techniques and technologies. IEEE 30th International Conference on Data Engineering (ICDE), Chicago, 2014
<Link>, 2. https://doi:10.1109/ </Link>

<Link>ICDE.2014.6816634 </Link>
</P>

<P>Sivarajah, U., Kamal, MM., Irani, Z., &amp; Weerakkody, V. 2017. Critical analysis of big data challenges and analytical methods. Journal of Business Research
<Link> 70:263-286. https://doi.org/10.1016/j. </Link>

<Link>jbusres.2016.08.001 </Link>
</P>

<P>Solarwinds Pingdom. 2012. Internet 2011 in numbers. 17 January 2012. 
<Link>Available at: http://royal.pingdom.com/2012/01/17/internet2011</Link>

<Link>in-numbers</Link>
. (Accessed on 12 January 2024). </P>

<P>Solarwinds Pingdom. 2013. Internet 2012 in numbers. 17 January 2013. 
<Link>Available at: https://www.pingdom.com/blog/internet-2012-in</Link>

<Link>numbers</Link>
. (Accessed on 12 January 2024). </P>

<P>Stamatatos, E. 2009. A survey of modern authorship attribution methods. Journal of the American Society for information Science and Technology
<Link> 60(3):538-556. https://doi.org/10.1002/asi.21001 </Link>
</P>

<P>Stavngaard, M., Sørensen, A., Lorenzen, S., Hjuler, N., &amp; Alstrup, S. 2019. Detecting ghostwriters in high schools. arXiv preprint arXiv:1906.01635. 6 pages. </P>

<P>Taigman, Y., Yang, M., Ranzato, MA., &amp; Wolf, L. 2014. Deepface: Closing the gap to human-level performance in face verification. Proceedings of the IEEE conference on computer vision and pattern recognition
<Link> (CVPR), 1701-1708. https://doi.org/10.1109/ </Link>

<Link>CVPR.2014.220 </Link>
</P>

<P>Takyar, A. 2022. AI use cases &amp; applications across major industries. LeewayHertz-Software Development Company Publications. 
<Link>Available at: https://www.leewayhertz.com/ai-use-cases-and</Link>

<Link>applications/</Link>
. (Accessed on 12 December 2023). </P>

<P>Tjano, RN. 2021. An empirical study of corporate governance and sustainability reporting practices in South African state-owned entities. Doctoral thesis. University of South Africa. Pretoria. 
<Link>Available at: https://uir.unisa.ac.za/handle/10500/28099. </Link>
(Accessed on 12 January 2024). </P>

<P>Turing, AM. 1950. Can a machine think. Mind
<Link> 59(236):433-460. https:// </Link>

<Link>doi.org/10.1093/mind/LIX.236.433 </Link>
</P>

<P>Venugopal, R. &amp; Mamatha, V. 2023. Impact of artificial intelligence (AI) on teaching and learning in India’s higher education sector. IOSR Journal of Research &amp; Method in Education
<Link> 13(5):1-6. https://doi. </Link>

<Link>org/10.9790/7388-1305020106 </Link>
</P>

<P>Wahle, JP., Ruas, T., Kirstein, F., &amp; Gipp, B. 2022. How large language models are transforming machine-paraphrased plagiarism. Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing (EMNLP)
<Link>, 1-12. https://doi.org/10. </Link>

<Link>ArXiv:2210.03568 </Link>
</P>

<P>Wang, F. &amp; Preininger, A. 2019. AI in health: State of the art, challenges, and future directions. Yearbook of Medical Informatics 28(1):16-26. 
<Link>https://doi.org/10.1055/s-0039-1677908 </Link>
</P>

<P>Wirtz, BW., Weyerer, JC., &amp; Geyer, C. 2019. Artificial intelligence and the public sector – applications and challenges. International Journal of Public Administration
<Link> 42(7):596-615. https://doi.org/10.1080/0 </Link>

<Link>1900692.2018.1498103 </Link>
</P>

<P>Xiong, Y., He, Y., Huang, H., Yu, C., &amp; Jing, X. 2020. Air quality statistics and prediction based on urban agglomerations and sentiment analysis of people under different pollutants. In Wang, Y., Fu, M., Xu, L., &amp; Zou, J. (Eds.): Signal and information processing, networking and computers. lecture notes in electrical 
<Link>engineering, 78-87. Vol 628. Singapore: Springer. https://doi. </Link>

<Link>org/10.1007/978-981-15-4163-6_10 </Link>
</P>

<P>Zawacki-Richter, O., Marín, VI., Bond, M., &amp; Gouverneur, F. 2019. Systematic review of research on artificial intelligence applications in higher education – where are the educators? International Journal of Educational Technology in Higher Education 
<Link>16(39):1-27. https://doi.org/10.1186/s41239-019-0171-0 </Link>
</P>

<P>Zhang, C., Jiang, W., Zhang, Y., Wang, W., Zhao, Q., &amp; Wang, C. 2022. Transformer and CNN hybrid deep neural network for semantic segmentation of very-high-resolution remote sensing imagery. IEEE Transactions on Geoscience and Remote Sensing 60:1-20. 
<Link>https://doi.org/10.1109/TGRS.2022.3144894 </Link>
</P>

<P>Zhou, Q., Gu, J-J., Ling, C., Li, W-B., Zhuang, Y., &amp; Wang, J. 2020. Exploiting multiple correlations among urban regions for crowd flow prediction. Journal of Computer Science and Technology 
<Link>35:338-352. https://doi.org/10.1007/s11390-020-9970-y </Link>
</P>

<P>Zhu, Z., Lin, K., Jain, AK., &amp; Zhou, J. 2023. Transfer learning in deep reinforcement learning: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence
<Link> 45(11):13344-13362. https://doi. </Link>

<Link>org/10.1109/TPAMI.2023.3292075 </Link>
</P>

<P>Zimmerman, AA. 2023. Ghostwriter for the masses: ChatGPT and the future of writing. Annals of Surgical Oncology 30:3170-3173. 
<Link>https://doi.org/10.1245/s10434-023-13436-0 </Link>
</P>
</Sect>
</Sect>
</Sect>

<Sect>
<H2 id="LinkTarget_10759">Section 2 </H2>

<P>Generative AI in Higher Education: Revolutionising Teaching, Assessment, </P>

<Sect>
<H3>and Integrity </H3>

<Link><Figure id="LinkTarget_10760">

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_51.jpg"/>
</Figure>
</Link>
</Sect>
</Sect>

<Sect>
<H2>Chapter 6 </H2>

<Sect>
<H3>Transforming Higher Education: The Profound Impact of Generative Artificial Intelligence on Teaching and Learning in the ChatGPT Era </H3>

<P>Geesje van den Berg </P>

<Link><Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_52.jpg"/>
</Figure>
</Link>

<Link>
<P>Department of Curriculum and Instructional Studies University of South Africa Pretoria, South Africa </P>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_53.jpg"/>
</Figure>
</Link>

<Sect>
<H4>Introduction </H4>

<P>Educational practices in the 21st century have undergone a significant transformation due to technological advances, particularly in artificial intelligence (AI). Recent developments and expansion in machine learning led to the emergence of generative artificial intelligence (Gen-AI), an advanced and innovative technology (Hu 2022). Lim, Gunasekara, Pallant, Pallant, and Pechenkina (2023:2 of 13) define Gen-AI as a technology that </P>

<P>1) uses deep learning models to 2) generate human-like content (e.g., images and words) in response to 3) complex and varied prompts (e.g., languages, instructions, and questions). It can do so in response to a wide range of prompts that may be complex and diverse, including different languages, instructions, or questions. In other words, Gen-AI can produce human-like outputs when given various types of input. </P>

<P>ChatGPT (chat generative pre-trained transformer), an implementation of Gen-AI, developed and released by OpenAI in November 2022, uses publicly available digital content data to produce text in multiple languages that is very similar to human writing. OpenAI released ChatGPT 3.5 in November, followed by a more powerful and capable version to perform more complex functions, ChatGPT 4, in March 2023. In January 2024, it had around 180 million users and is a rapidly growing and widely used platform (OpenAI 2024). Upon registration, both versions are readily available for use without any prior training required. While the first version is free, ChatGPT 4 currently costs $20 monthly (OpenAI 2024). The powerful language model, which is still an emerging innovation, has already shown that it can display creativity in its writing, from a single paragraph to a complete research paper, on almost any topic (Aydin &amp; Karaarslan 2022:22). It has made educational resources more accessible and extended the reach of higher education beyond traditional classroom settings or local settings, creating new opportunities for both lecturers and students (Celik 2023:4). </P>

<Link><Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_54.jpg"/>
</Figure>
</Link>

<P>The scalability and accessibility of Gen-AI enable a global reach in HE (higher education), breaking geographical barriers and making quality education available to students worldwide. These developments further represent a significant advancement in smart learning, enhancing the ability to offer personalised and engaging learning experiences (Pesovski, Santos, Henriques, &amp; Trajkovik 2024:2). ChatGPT plays a pivotal role in enhancing the learning environment by tailoring responses to each student’s unique needs and preferences, as highlighted in a recent study by Rawas (2023:2). Furthermore, the capabilities of ChatGPT, such as automated grading and feedback, ease the burden on educators, enabling them to focus on more valuable tasks, such as facilitating discussions, building social engagements, and providing specialised support that cannot be provided by Gen-AI (Conijn, Kahr, &amp; Snijders 2023:41). </P>

<P>Despite the capabilities of ChatGPT (and similar technologies such as Google’s Bard and Copilot by Microsoft), it should be considered along with the risks and challenges to ensure its ethical and responsible use in HE. For example, Adiguzel, Kaya, and Cansu (2023:1) mention ethical issues such as bias and the need to train educators to successfully incorporate Gen-AI tools as challenges in using ChatGPT. Additionally, Rawas (2023:2) adds that due to the significant amount of data ChatGPT uses, privacy and security issues are a top priority when using the tool. As ChatGPT is an AI-generated assistant that facilitates communication between students and educators, it is important to ensure that its use does not have negative consequences. To achieve this, adequate monitoring by educators and accountability on the side of students are needed. This will ensure that all interactions are appropriate, respectful, ethical, and safe. Therefore, it is the responsibility of both students and educators to use Gen-AI tools such as ChatGPT with care and consideration to avoid any intentional and unintentional harm that may arise from using the tool. In doing so, the tool remains useful to all users. </P>

<P>The use of Gen-AI tools such as ChatGPT in HE has gained significant attention from universities worldwide as a disruptive tool for teaching and learning, as well as for supporting their staff and students (e.g., Conijn et al. 2023:39; Wong 2024:2). Similarly, IHEs (institution of higher education) have actively explored the potential of incorporating Gen-AI tools into their pedagogical approaches. The aim is to enhance the quality of education by offering personalised learning experiences that address the unique needs of each student (Rasul, Nair, Kalendra, Robin, De Oliveira Santini, Ladeira, Sun, Day, Rather, &amp; Heathcote 2023:43). Such an approach recognises that Gen-AI technology has the potential to transform traditional teaching methods, making learning more interactive, engaging, and effective. With the help of Gen-AI tools such as ChatGPT, IHEs can create customised learning paths that are tailored to the individual needs, interests, and learning styles of each individual student. </P>

<P>By analysing student performance data and feedback, ChatGPT algorithms can identify strengths and areas that need more support. This will, in turn, provide them with targeted interventions to help them overcome their challenges. Such personalised learning approaches can help students stay motivated and engaged, which could lead to better outcomes. However, due to the recent emergence of Gen-AI tools, the academic community is still in the process of determining the most efficient and responsible ways to incorporate such tools into HE. Ongoing research in this area is needed. </P>

<P>This chapter offers insights into how Gen-AI tools like ChatGPT could revolutionise HE. From a theoretical and interpretative analysis based on an integrative literature review (Hambaloyi &amp; Jordan 2016:121), recent articles (mostly published in 2023) are focusing on the transformative role of Gen-AI, with specific reference to how ChatGPT was analysed in HE. The following search string was used on Google Scholar to search for articles: Higher education AND generative AI OR ChatGPT AND opportunities AND transformation AND teaching AND learning. From the literature search, the limited investigation into the topic was confirmed, as noted by Rawas (2023:3). Most studies revolve around broader AI in education, education in general, or the use of specific tools. To further refine the search, articles focussing on these topics were excluded for the purpose of this chapter, although they might have had relevance. Based on the literature review, this chapter aims to investigate the unique opportunities and challenges that ChatGPT presents for transforming HE. </P>
</Sect>

<Sect>
<H4>Opportunities of ChatGPT in Higher Education </H4>

<P>The emergence of Gen-AI tools is the key to progress innovation and can revolutionise HE. It can create exciting new opportunities for students, educators, and IHEs alike if they understand its possibilities and realities. With its cutting-edge technology and advanced natural language processing capabilities, ChatGPT can open previously unexplored avenues of teaching and learning. </P>

<P>Based on the literature reviewed (as indicated below), this section discusses some of the promising options that ChatGPT provides. The studies below meet the inclusion criteria and provide an insightful glimpse into how ChatGPT is changing, and could increasingly revolutionise teaching and learning in HE in the future. However, it is worth mentioning that these studies are not meant to be an exhaustive list of all research done in 2023 in this area. The table below shows the authors and their respective countries for the selected papers. Of importance is that, although the search string was limited to opportunities, the chosen articles also highlight challenges that come along with them to provide a balanced perspective. The opportunities and challenges identified in the articles are listed below, and a discussion follows. </P>

<Table>
<Caption>
<P>Table 6.1: Authors of papers indicating opportunities and challenges identified in their papers </P>
</Caption>

<TR>
<TH>Author(s) </TH>

<TH>Opportunities </TH>

<TH>Challenges </TH>
</TR>

<TR>
<TD>Rawas (2023) </TD>

<TD>Personalised learning. Interactive learning. </TD>

<TD>Bias. Ethics. </TD>
</TR>

<TR>
<TD>(Lebanon) </TD>

<TD>Automated grading. Intelligent tutoring. Content creation. Language learning. Accessibility. </TD>

<TD>Lack of human interaction. Technical issues. Cost and implementation. </TD>
</TR>

<TR>
<TD>Rasul et al. (2023) (Australia, Brazil, India) </TD>

<TD>Adaptive learning. Individualised feedback. Research, writing, and data analytics support. Administrative support. Assessment support. </TD>

<TD>Ethics. Equity considerations. Academic integrity. Potential bias. False information. Evaluation of graduate skill sets. Assessment of learning outcomes. </TD>
</TR>

<TR>
<TD>Adiguzel et al. </TD>

<TD>Language translation. </TD>

<TD>Ethics. </TD>
</TR>

<TR>
<TD>(2023) </TD>

<TD>Text summarisation. Question answering. </TD>

<TD>Privacy. System performance </TD>
</TR>

<TR>
<TD>(Turkey) </TD>

<TD>Creative writing. Content creation. Responses to prompts. Explanation of complex subjects, concepts, or themes. Error correction in existing code. Generation of new codes. </TD>

<TD>issues. Inaccuracy of content. Academic integrity. </TD>
</TR>

<TR>
<TD>Vargas-Murillo, De la Asuncion, and De Jesús Guevara-Soto (2023) (Peru) </TD>

<TD>Enhancement of the teaching and learning process. Generation of new ideas. </TD>

<TD>Overreliance on AI. Ethics. Inaccuracy of content. Loss of ability to produce original content. Academic integrity. Loss of critical thinking skills. Academic dishonesty. Cheating in exams. </TD>
</TR>
</Table>

<Table>
<TR>
<TH>Author(s) </TH>

<TH>Opportunities </TH>

<TH>Challenges </TH>
</TR>

<TR>
<TD>Mhlanga (2023) </TD>

<TD>Personalised learning. Provision of </TD>

<TD>Bias. Lack of empathy. </TD>
</TR>

<TR>
<TD>(South Africa) </TD>

<TD>educational materials. Widening participation. Language support. Improved efficiency. Cost-effectiveness. Assessment support. </TD>

<TD>Lack of creativity. Overreliance on AI. Limited domain knowledge. Connectivity. </TD>
</TR>

<TR>
<TD>Gill, Patros, Wu, Kaur, Kaur, Fuller, Singh, Arora, Parlikad, and Stankovski (2024) (UK, China, New Zealand, Canada, USA, India, Slovenia, Australia, Austria) </TD>

<TD>Personalised support. Language assistance. Customised learning. Active learning/flipped learning. Participatory learning. </TD>

<TD>Inaccuracy of content. Plagiarism. Bias. Ethics. </TD>
</TR>
</Table>

<P>Lastly, in addition to the work of the above researchers, the following question was asked to ChatGPT 4: Considering its opportunities and challenges, how could ChatGPT transform teaching and learning in higher education? The response included the opportunities and challenges indicated in Table 6.2. </P>

<P>Table 6.2:Opportunities and challenges identified by ChatGPT </P>

<Table>
<TR>
<TH>ChatGPT </TH>

<TH>Opportunities </TH>

<TH>Challenges </TH>
</TR>

<TR>
<TD>ChatGPT (OpenAI 2024) </TD>

<TD>Personalised learning. Accessibility. Inclusivity. Research and writing. Supplemental teaching tool. Interactive learning environments. </TD>

<TD>Academic integrity. Quality assurance. Ethics and privacy. Teacher and student adaptation. Overreliance on AI. </TD>
</TR>
</Table>

<P>Based on the concepts provided in Tables 6.1 and 6.2, ChatGPT created the following word cloud on the opportunities of ChatGPT. It indicated that it used the Python application to create it. The word cloud visually represents the key terms and concepts, with more frequent terms appearing larger in the cloud. It provides a quick and insightful view of the main themes and opportunities associated with ChatGPT. </P>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_55.jpg"/>

<Caption>
<P>Figure 6.1: Word cloud of the opportunities of ChatGPT in Higher Education. (Source: OpenAI 2024) </P>
</Caption>
</Figure>

<P>Similar to the opportunities, ChatGPT was asked to create a word cloud for the challenges listed in the table. </P>

<P>The purpose of the word clouds was to extract and visualise key information from the table presenting ChatGPT’s benefits and challenges in transforming HE. The most prominent or recurring concepts are clearly visible as they appear in the larger text. The word clouds also helped to identify the most important aspects of the opportunities in the following discussion. </P>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_56.jpg"/>

<Caption>
<P>Figure 6.2: Word cloud of the challenges of ChatGPT in Higher Education. (Source: OpenAI 2024) </P>
</Caption>
</Figure>

<Sect>
<H5>Discussion of Opportunities and Challenges in Transforming Higher Education </H5>

<P>Opportunities </P>

<P>Through the analysis of opportunities presented by ChatGPT in transforming HE as they occurred and were discussed in the articles, several insightful perspectives emerged. These perspectives offer a deeper understanding of how ChatGPT can be used to enhance teaching and learning in HE. One recurring theme that emerged was the opportunity for personalised learning and support, which was often mentioned as its most valuable advantage (cf. Gill et al. 2024:20). Personalised learning and support refer to an approach that tailors learning experiences to the unique needs of individual students through unique responses and resources (Yang, Hwang, &amp; Yang 2013:185). According to Chiu, Moorehouse, Chai, and Ismailov (2023:11), such a personalised, often step-by-step approach to teaching and learning, including timely feedback, encourages self-directed learning and self-reflection through identifying and learning students’ behaviour and errors. This customisation makes the learning experience more relevant and engaging, encouraging students to take control of their learning journey. For example, it can allow students to spend more time on challenging tasks and quickly skim through materials and content that they already understand, fostering a sense of autonomy in learning. It can further stimulate curiosity by providing materials that align with the interests of a student. </P>

<P>ChatGPT can analyse students’ learning styles, preferences, and requirements to offer customised feedback and content, leading to more effective and engaging learning experiences (Rawas 2023:5; OpenAI 2024). As such, Vargas-Murillo et al. (2023:123) refer to ChatGPT as an auxiliary tool to help students complete certain tasks based on their unique preferences. This personal content and feedback are possible because computer algorithms and AI use the interaction of a student with previous content to determine future materials and trends (Kerr 2016:89). An important aspect of these algorithms highlighted by Mhlanga (2023:6) is that ChatGPT can use them according to the specific requirements and preferences of each student. Because Gen-AI tools learn from vast amounts of text data, they can predict the most appropriate responses. This learning capability of ChatGPT makes it possible to adapt its responses based on the input of the user, making the learning experience more personalised (OpenAI 2024). </P>

<P>Personalised and adaptive learning can enhance student engagement and motivation (Rasul et al. 2023:44; Mhlanga 2023:3). Educators and students can access ChatGPT for on-demand feedback and content, depending on their needs. ChatGPT is human-like and conversational; therefore, students are more likely to feel engaged and valued. This ability to offer interactive learning experiences has many advantages, as ChatGPT promptly responds to questions and allows for follow-up conversations and question-and-answer opportunities (Rawas 2023:5; OpenAI 2024). Active participation in the learning process can positively impact the learning experience and improve the overall quality of education. ChatGPT can help in many ways in this regard, as it uses normal language to generate ideas for research, assessments, and writing tasks. Using ChatGPT, educators can also gain valuable insight into student engagement with course content and identify areas for improvement. This can help the educators to adapt their teaching approaches and improve the learning outcomes of their students. Therefore, ChatGPT’s ability to offer low-cost, automated support can transform HE, as it can offer high-level, personalised, on-demand support to both educators and students. </P>

<P>Related to personalised learning is the opportunity offered by ChatGPT for individualised feedback to both students and educators. Based on the student’s prompts, ChatGPT can provide individualised feedback, making learning a more rewarding experience. Additionally, ChatGPT has the potential to detect errors and guide students in how to improve their learning (Rasul et al. 2023:45). Rather than relying on traditional onesize-fits-all feedback approaches, students can build their own understanding of concepts and content. However, it is important for both students and educators to know that ChatGPT can provide incorrect information and that its accuracy greatly depends on accurate prompts (Rudolph, Tan, &amp; Tan 2023:352). It is also true that accurate prompts can produce inaccurate responses. Therefore, relying on ChatGPT without acknowledging its limitations can lead to the dissemination of incorrect or misleading information, which negatively affects academic integrity. The implication is that in the era of Gen-AI, humans, in this case, educators and students, are still essential to check the accuracy of the information generated by tools such as ChatGPT. Individualised feedback can further reduce costs, as the use of human tutors could be reduced to provide student feedback (Rasul et al. 2023:45). </P>

<P>However, as Rawas (2023:5) argues, ChatGPT can contribute to personalised learning by acting as a valuable personal tutor for students. This is especially applicable in courses with large numbers of students. As a personal tutor, it can perform a range of tasks, such as answering complex questions, helping students to practise conversations, summarising key concepts, and preparing them for assessments by offering practice questions. In addition, it can provide motivational support by being always available. This is specifically relevant to those who, for different reasons, cannot attend the class. However, Gen-AI tools such as ChatGPT should be used as partners for humans and not as replacements, since checks for accuracy, correctness, relevance, and ethical use are necessary. </P>

<P>LLMs (large language models) such as ChatGPT can assist both educators and students in initiating ideas or completing writing and research tasks by providing ideas and support (Rasul et al. 2023:45; Vargas-Murillo et al. 2023:132; OpenAI 2024). ChatGPT can be an efficient assistant by providing basic information on any topic of interest. It can also assist in conducting initial literature reviews by summarising research articles and generating draft versions of research articles, providing a solid foundation to build on (Rudolph et al. 2023:342). This implies that ChatGPT can act as a partner or research assistant in research activities. However, as Rasul et al. (2023:45) caution, this should be done ethically, while academic integrity should not be compromised. Writing support is a valuable resource for students and educators who work in a language different from their home language (Rudolph et al. 2023:343). However, this should be approached with caution, as ChatGPT can provide incorrect information and fabricate references (Sok &amp; Heng 2024). </P>

<P>Rasul et al. (2023:46) mention automated administrative support as a benefit offered by ChatGPT as a possible way of teaching and learning support to transform HE. This aspect is relevant to this chapter because such support can help students in satisfaction and success: ‘ChatGPT can help students overcome administrative challenges while also providing them with precise and timely information’ (Rasul et al. 2023:46). This support is extremely valuable in distance education contexts where students are not necessarily on campus and may easily find themselves lost and isolated. They may have to travel without this kind of assistance to get the needed support. If ChatGPT is integrated with an institution’s learning management system, it can take ownership of administrative tasks by providing customised information on teaching and learning issues, such as issues with upcoming due dates, incomplete tasks, and progress reports. </P>

<P>According to Lo, Hew, and Jong (2024:2 of 21), AI technologies can assist in administrative tasks by detecting student disengagement. These tasks can alleviate educators’ administrative burdens and contribute to a more effective and successful teaching and learning experience for both educators and students. </P>

<P>As another advantage, ChatGPT can be integrated into innovative assessment activities to promote a student-centred environment. Innovative assessment contributes to a more comprehensive and meaningful evaluation of student learning, as stated in the study conducted by Rasul et al. (2023:46). For instance, ChatGPT can be used to create various types of questions, such as multiple choice, short answer, and case studies, and can be used to evaluate the assessments created by ChatGPT. Mhlanga (2023:8) adds that ChatGPT has the ability to understand the larger context of the assignment, assess students’ work, and provide personalised feedback. It can analyse the content, structure, and style of the assessment, pointing out suggestions for improvement and helping students understand their specific strengths and weaknesses. This approach can help to develop the critical thinking skills of educators and students and encourage them to engage with ChatGPT. </P>

<P>The use of ChatGPT in assessments requires innovation, creativity, and authenticity in the assessment setting. This involves moving away from traditional assessments that often rely on the memorisation of content. The integration of ChatGPT can promote collaboration and problem-solving skills. Students can, for example, use ChatGPT to brainstorm ideas, clarify concepts, and seek guidance, which could foster teamwork and collaboration. However, designing such innovative assessment tasks requires skilled educators. Related to the setting of assessments is the ability of ChatGPT to assist with automated grading. Grading assignments, tests, and examinations can be automated, offering educators more time for teaching-related tasks. Automated grading further provides faster feedback and minimises biases in grading (Rawas 2023:5). </P>

<P>ChatGPT has gained a reputation for creating high-level educational content such as lecture notes, summaries, and explanations (Rawas 2023:6). Educators can partner with ChatGPT to generate comprehensive lecture notes that are interactive and engaging. With its vast knowledge base and natural language processing capabilities, ChatGPT can create explanations, summaries, or examples if needed, making it easier for educators to create interactive and comprehensive learning materials. Mhlanga (2023:5) adds that ChatGPT can help educators to provide high-quality educational resources and insightful information. These capabilities would improve the effectiveness of teaching and learning by providing much-needed resources that may not otherwise be available. </P>

<P>In addition to its collaboration with educators, students can benefit from the ability of ChatGPT to create content. They can use it to find relevant sources and summarise research papers, which can save them time and effort. The language model can further help students with tasks such as outlining, structuring, and developing arguments for essays. These tasks are crucial for academic success, and the language model can provide valuable guidance, making the writing process more manageable and effective. However, these processes must be ethical and transparent, and the use of chatbots in such writing processes must be acknowledged by students. </P>

<P>As a language model, ChatGPT can help students with language-related tasks. For example, it can help students learn a language by offering conversation partners and language models (Rawas 2023:6). It can stimulate conversations in the target language, helping students to practise speaking and comprehension. Students can learn new words and phrases through interaction with ChatGPT, which can, in turn, provide examples of how these words are used in context, thus assisting with comprehension. It can further assist students in correcting their sentences, practising their pronunciation, grasping sentence structure, and giving accurate text interpretations (Gill et al. 2024:20). It additionally helps students by explaining grammatical rules in a user-friendly way. When students ask a specific question about grammar they will receive a detailed explanation. This means that students and educators can produce basic texts and ask ChatGPT to transform and correct them in the desired style or format. Because ChatGPT has been trained in a variety of languages, it is possible for students who may struggle with concepts in the language of teaching and learning to still benefit from the educational experience (Mhlanga 2023:7). These recent developments have enabled language support to students, making higher education possible for all students, regardless of their backgrounds or the language they speak. This means that ChatGPT can give a translation and explanation of the language on demand. The tool can also assist with language-related tasks such as text summarisation, interpretation, and creative writing, like poems or fiction (Hutson &amp; Schnellmann 2023:3). </P>

<P>The last possibility discussed in this section is that ChatGPT can expand access to quality education for all students, regardless of location, time of day, or personal circumstances. For example, Mhlanga (2023:2), referring to emerging markets, shares that ChatGPT can address challenges such as students living in deep rural areas with limited resources, including a shortage of educators and limited access to quality education. </P>

<P>Furthermore, access to personalised information due to Gen-AI tools makes it easier for students with disabilities who otherwise would not have access. Although ChatGPT is largely a text-based model, it can work alongside tools such as screen readers for visually impaired students or simplified explanations for students with cognitive disabilities. By providing everyone with access to quality education, ChatGPT can help to bridge educational gaps and create a more inclusive learning environment for all students. </P>

<P>Challenges </P>

<P>The integration of Gen-AI tools in the field of HE has the potential to bring about significant transformation. This transformation can manifest itself in many ways and can lead to numerous benefits for both educators and students in HE. However, despite the many benefits, several challenges have been discussed in the reviewed literature and are discussed next. </P>

<P>According to Rawas (2023:7), one of the biggest challenges facing ChatGPT is related to ethical issues such as bias. This </P>

<P>is because it may display partiality towards certain groups depending on the data on which it has been trained. For example, if a LLM is only trained on specific data sets, it could reinforce misunderstandings and result in students constructing inaccurate information and perceptions (Adiguzel et al. 2023:6). Therefore, it is crucial to ensure that the training data used are diverse and representative to avoid such challenges. It is important to note that ChatGPT, as an AI-powered assistant, may lack the cultural sensitivity and nuances needed that exist in certain countries and languages. Therefore, it could provide responses that are not culturally suitable, especially in countries with different cultures or where this is a sensitive matter (Mhlanga 2023:10). </P>

<P>Students may not always be able to evaluate the information they receive from ChatGPT, which may lead to a lack of development of needed knowledge and skills and even the wholistic growth of students. Furthermore, Mhlanga (2023:9) points out that those who develop technologies such as ChatGPT may deploy them to support their own biases, consciously or not, which may harm students’ learning and growth. For this reason, humans need to be aware of this and partner with technology to ensure that Gen-AI tools are used as intended and do not disadvantage anyone. </P>

<P>Another ethical issue is related to privacy and security, which have been noted as key challenges of ChatGPT (Rawas 2023:8; Adiguzel et al. 2023:6). Because ChatGPT generates text on the input it receives, there is a risk that these data could be exposed or misused, especially if proper data handling and privacy protocols are not in place. Additionally, student and educator data must be protected by secure systems, as ChatGPT has access to large amounts of data. In this regard, Chan (2023:14) suggests that IHEs must have policies in place to protect their content. </P>

<P>Additional ethical issues are transparency and accountability. Users’ online information is often used to train and improve Gen-AI models such as ChatGPT. Concerns arise about how these data are stored, who can access them, and for what purposes they are used, especially if they contain personal or sensitive information. Many users, including educators and students, provide this kind of data, assuming that it is treated as confidential. Therefore, Rawas (2023:8) emphasises that institutions must be transparent and honest about using ChatGPT (and other Gen-AI tools) and hold themselves accountable for any decisions based on Gen-AI suggestions. Students should also give their consent after being informed how ChatGPT is used and how their data are handled, keeping in mind the data protection laws in different countries (such as the Protection of Personal 
<Link>Information Act in South Africa and the General Data Protection </Link>

<Link>Regulation in the European Union). Ensuring compliance with </Link>
such laws can be challenging given the global nature of the internet and the diverse user base of ChatGPT. </P>

<P>From an ethical perspective, the pedagogical impact of ChatGPT is another challenge that needs to be considered. The use of ChatGPT must be in line with educational objectives and principles and should not negatively affect the role of educators (Rawas 2023:10). For example, using ChatGPT as the only means of assessing student learning outcomes may impede social interaction and the ability to construct meaning through reflection, collaboration, and human interaction, as pointed out by Rasul et al. (2023:49). Therefore, a combination of human intelligence and AI may be the optimal solution to address this challenge. </P>

<P>One of the major hurdles that ChatGPT has to overcome is its lack of human interaction and empathy. While ChatGPT has the ability to offer personalised learning and feedback and acts human-like, it cannot replace collaboration and social interaction with humans and is unable to show empathy toward students or educators (Rawas 2023:7; Mhlanga 2023:9). If students are mostly relying on ChatGPT for their academic needs, they might miss out on emotional support, the opportunity for collaboration, socialisation, and discussions that are crucial for their overall development (Rasul et al. 2023:48). This might be stressful for students who have difficulty understanding certain concepts or even entire subjects. It might lead to isolation, a lack of motivation, and lead to further overreliance on technology. While ChatGPT can provide valuable information, it cannot establish a relationship between students and educators or among students (Mhlanga 2023:9). These relationships motivate and inspire students to study and perform at their best. </P>

<P>The increasing use of Gen-AI tools in HE raises concerns about the potential impact on student learning and development. For example, Rasul et al. (2023:47) and Vargas-Murillo (2023:124) argue that students may rely more on feedback from ChatGPT at the expense of their educators due to an overreliance on technology. This could be due to the human-like nature of ChatGPT and its ability to facilitate communication, which can make students feel more comfortable and confident in their interactions with the tool. However, an overreliance on AI tools could lead to a decrease in the development of critical thinking and problem-solving skills. This is because students may be more likely to accept the feedback provided by ChatGPT without questioning it or engaging in deep analysis to ensure correct and reliable feedback and information (Tiwari, Bhat, Khan, Subramaniam, &amp; Khan 2024:348). As a result, educators should work in conjunction with AI tools to ensure that students receive accurate and reliable feedback and information while also encouraging them to develop critical thinking and problem-solving skills. Relying too much on ChatGPT can have adverse effects on the growth of graduate skills, such as critical thinking, problem solving, collaboration, global and cultural awareness, and leadership. This means that using ChatGPT requires a balanced approach that upholds academic integrity and freedom while promoting graduate skill development (Rasul et al. 2023:49). In this regard, Vargas-Murillo et al. (2023:123) suggest that students should be ethical in deciding whether they want to use ChatGPT to complement their learning, as well as how much input the tool should be allowed to influence their work and skills development. In this way, exploitation is avoided, and they can learn how to use the tool effectively and avoid abusing it. Additionally, by using Gwn-AI tools as a supplement to, rather than a replacement for, traditional teaching methods, educators can help students to receive a well-rounded education that prepares them for the world of work. </P>

<P>One of the significant challenges of integrating ChatGPT into the HE sector is maintaining academic integrity. This concern </P>

<P>arises mainly from the possibility that students may cheat during exams or use ChatGPT to generate essays, research papers, or assignments that should be their own writing. They can also engage in plagiarism by copying and pasting information from ChatGPT without properly referencing it, negatively impacting their learning and resulting in academic integrity violations (Vargas-Murillo et al. 2023:131; Mhlanga 2023:2). Related to plagiarism is the issue of intellectual property. If students submit Gen-AI content, the question arises: Whose content is this? Does the student, ChatGPT, or the institution to which the work has been submitted own the content? </P>

<P>The issue of plagiarism detection has become increasingly challenging for educators, also because accessing content and answers on ChatGPT is quick and very easy. Furthermore, Gen-AI produces content that is not easily distinguishable from text written by humans. This poses a significant challenge, as the technology to detect AI-generated texts is not always accurate (Raschka 2023; Mhlanga 2023:2; Gill et al. 2024:20). Even if students are allowed to use Gen-AI tools in their assessments, they will have an unfair advantage over those who are not using it. This raises issues of fairness because it undermines the integrity of the assessment process. Therefore, a culture of academic integrity and ethical use of ChatGPT should be emphasised (Sullivan, Kelly, &amp; McLaughlan 2023:34). When students are made aware of the importance of authentic learning, they will have a better understanding of the importance of using their own ideas and building their own arguments. </P>

<P>Technical problems and connectivity are recorded as another challenge (Rawas 2023:7; Mhlanga 2023:7), as issues such as system failures and challenges related to internet access negatively affect access to technology and Gen-AI tools. Vogels (2021) refers to digital inequity, which occurs when access to high-speed internet and technologies is not evenly distributed, which could widen the digital divide. The disparity in access becomes evident when some students have access to advanced tools such as ChatGPT for learning and research while others do not. When students lack access to the knowledge provided by Gen-AI tools such as ChatGPT, the knowledge gap is further widened. This gap can result in unequal learning opportunities and outcomes, contributing to increased educational inequalities. It is essential to recognise that Gen-AI tools should be available to everyone, regardless of their background or circumstances. In order to bridge the gap of inequalities, IHEs must make efforts to provide access to digital tools to all their students. This will help to ensure that everyone has the opportunity to benefit from the advancements of technology and contribute to a more inclusive and diverse digital community (Lim et al. 2023:8 of 13; Gill et al. 2024:21). This is particularly relevant for developing areas where many students face difficulties in accessing electricity and the internet. </P>

<P>The last challenge identified in this chapter is the ability of Gen-AI tools to produce false or inaccurate information. This could include providing, for example, incorrect facts, summaries of literature reviews, or references, which can be misleading to students instead of assisting them. To overcome this challenge, Rasul et al. (2023:49) suggest that students and educators should be aware of these inaccuracies to produce accurate information. Vargas-Murillo et al. (2023:131) also warn against the so-called ‘hallucination effect,’ in which AI tools generate information or responses that are factually incorrect or not related to the prompt it received. This is because ChatGPT can only present data on which it has been trained and does not have access to real-time data. For this reason, it may sometimes provide outdated information (OpenAI 2024). Furthermore, it may misunderstand a prompt, leading to irrelevant responses, specifically with complex, ambiguous, or poorly structured prompts or questions (OpenAI 2024). This challenge should remind both educators and students of the need for human verification to evaluate the information and verify its accuracy and correctness. This is definitely important in educational and academic contexts where accurate information is crucial. </P>

<P>Implications </P>

<P>From the literature review, it is clear that HE is going through a transformation process, which presents both opportunities and challenges. The findings expose a complex landscape with numerous significant implications for policy, theory, and practice. The most important implications of this research are stated below. </P>
</Sect>

<Sect>
<H5>Transforming pedagogical practices </H5>

<P>Gen-AI tools such as ChatGPT necessitate the rethinking of teaching, learning, and assessment approaches. They should include guidance, mentoring, knowledge application, and reflection rather than teaching content. Such approaches should aim to foster critical thinking and problem-solving skills. Gen-Ai tools diminish the importance of previous teaching and learning strategies, and both educators and students will need to be taught how to use the indicated technologies effectively and ethically. Institutions should carefully plan how to Gen-AI into their current systems, such as learning management systems, student systems, and related operational systems. This also implies that training is needed on how to use these technologies. </P>
</Sect>

<Sect>
<H5>Combining human and Gen-AI strengths </H5>

<P>The use of ChatGPT in HE requires a balanced approach. AI should be used for efficiency, while human elements should be retained for empathy, creativity, critical thinking, and QA (quality assurance). Combining the roles of humans and AI can lead to an interactive relationship in which each complements the capabilities of the other. This chapter demonstrates how Gen-AI and humans can collaborate to produce text. </P>
</Sect>

<Sect>
<H5>Developing ethical and privacy frameworks </H5>

<P>To ensure that the use of Gen-AI tools in IHEs is ethical, unbiased, and respects privacy, robust frameworks and policies need to be developed and implemented to address ethical, biased, and privacy concerns regarding AI use. </P>
</Sect>

<Sect>
<H5>Providing AI literacy training </H5>

<P>IHEs should provide adequate training opportunities to educators and students to enable them to critically assess information and use AI efficiently and ethically. This will ensure that everybody is able to take advantage of the opportunities that Gen-AI tools offer and avoid ethical dilemmas that might arise from their use. </P>
</Sect>

<Sect>
<H5>Bridging digital divides </H5>

<P>In the digital age, bridging the gap between those with access to Gen-AI tools is crucial. Efforts are therefore needed to ensure equitable access to Gen-AI tools to ensure that no institution, educators, or student is left behind. </P>
</Sect>

<Sect>
<H5>Preparing for future educational needs </H5>

<P>Integrating Gen-AI tools such as ChatGPT hints at future developments and skills development needs, emphasising agility and lifelong learning. IHEs must therefore equip academics and students with the necessary knowledge and skills to navigate an increasingly technology-driven environment. This is of utmost importance for IHEs in preparing students for the modern workforce. </P>

<P>The implications for HE, based on the opportunities and challenges identified in this research, can be summarised as follows: </P>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_57.jpg"/>

<Caption>
<P>Figure 6.3: The implications of using generative artificial tools to transform Higher Education. (Source: Personal archive) </P>
</Caption>
</Figure>
</Sect>
</Sect>

<Sect>
<H4>Conclusion </H4>

<P>This chapter aimed to offer insights in how Gen-AI tools such as ChatGPT could transform HE. It used an integrative literature review and focused on opportunities and challenges. The findings revealed that the transformative potential of ChatGPT in HE is significant and will continue to play a crucial role in shaping HE. From the literature review, several opportunities were discussed, namely personalised learning and support, improved student engagement and motivation, individualised feedback, assistance with writing and research, automated administrative support, innovative evaluation, educational content creation, language learning support, and expansion of access. With these opportunities, various challenges were also identified. These are ethical issues and bias, privacy and security concerns, transparency and accountability, a lack of human interaction and empathy, an overreliance on technology, academic integrity, technical and connectivity issues, and inaccurate information. </P>

<P>AI tools such as ChatGPT have already begun transforming HE and will increasingly change how educators teach and students learn. However, it is only the beginning, and several areas still need to be addressed. As AI tools will only get better and more human-like, HE will have to be adapted. Although ChatGPT has the potential to improve teaching and learning for everyone, the key is finding a balance and using this tool to enhance, rather than replace traditional educational practices. Collaboration among educators, researchers, policymakers, and students is essential to ensure that AI is used ethically and responsibly in education. The goal is to create a more equitable and effective education system that provides students with personalised teaching, feedback, and support by addressing the challenges posed by AI technologies and leveraging their benefits. </P>

<P>Every study has limitations, and this study is no exception. The main limitations are related to the number of articles reviewed and the methodology used. However, the findings of research papers from different countries around the world provided meaningful insights into the topic that can assist educators, students, and IHEs in a better understanding of Gen-AI in HE. </P>

<P>Lastly, ChatGPT and similar technologies will have an increasing impact on HE in the future. Therefore, further empirical research is needed to determine the potential benefits of these emerging technologies and how they can be used to the benefit of all. </P>
</Sect>

<Sect>
<H4>References </H4>

<P>Adiguzel, T., Kaya, MH., &amp; Cansu, FK. 2023. Revolutionizing education with AI: Exploring the transformative potential of ChatGPT. Contemporary Educational Technology
<Link> 15(3):1-13. https://doi. </Link>

<Link>org/10.30935/cedtech/13152 </Link>
</P>

<P>Aydın, Ö. &amp; Karaarslan, E. 2022. OpenAI ChatGPT generated literature review: Digital twin in healthcare. In Aydın, Ö. (Ed.): Emerging computer technologies Vol 2, 22-31. İzmir: İzmir Akademi Dernegi. 
<Link>https://dx.doi.org/10.2139/ssrn.4308687 </Link>
</P>

<P>Celik, I. 2023. Towards Intelligent-TPACK: An empirical study on teachers’ professional knowledge to ethically integrate artificial intelligence (AI)-based tools into education. Computers in Human Behavior
<Link> 138:1-12. https://doi.org/10.1016/j.chb.2022.107468 </Link>
</P>

<P>Chan, CKY. 2023. A comprehensive AI policy education framework for university teaching and learning. International Journal of Educational Technology in Higher Education
<Link> 20(38):1-25. https:// </Link>

<Link>doi.org/10.1186/s41239-023-00408-3 </Link>
</P>

<P>Chiu, TK., Moorhouse, BL., Chai, CS., &amp; Ismailov, M. 2023. Teacher support and student motivation to learn with artificial intelligence (AI) based chatbot. Interactive Learning Environments
<Link> 1-17. https://doi. </Link>

<Link>org/10.1080/10494820.2023.2172044 </Link>
</P>

<P>Conijn, R., Kahr, P., &amp; Snijders, C. 2023. The effects of explanations in automated essay scoring systems on student trust and motivation. Journal of Learning Analytics
<Link> 10(1):37-53. https://doi. </Link>

<Link>org/10.18608/jla.2023.7801 </Link>
</P>

<P>Gill, SS., Xu, M., Patros, P., Wu, H., Kaur, R., Kaur, K., Fuller, S., Singh, M., Arora, P., Parlikad, AK., &amp; Stankovski, V. 2024. Transformative effects of ChatGPT on modern education: Emerging era of AI chatbots. Internet of Things and Cyber-Physical Systems 4:19-23. 
<Link>https://doi.org/10.1016/j.iotcps.2023.06.002 </Link>
</P>

<P>Hambaloyi, W. &amp; Jordan, P. 2016. Systematic review as a research method </P>

<P>in postgraduate nursing education. Health SA Gesondheid 
<Link>21(0):120-128. http://doi.org/10.1016/j.hsag.2015.08.002 </Link>
</P>

<P>Hu, L. 2022. Generative AI and future. Medium. 17 July 2022. Available 
<Link>at: https://pub.towardsai.net/generative-ai-and-future</Link>

<Link>c3b1695876f2</Link>
. (Accessed on 4 January 2024). </P>

<P>Hutson, J. &amp; Schnellmann, A. 2023. The poetry of prompts: The collaborative role of generative artificial intelligence in the creation of poetry and the anxiety of machine influence. Global Journal of Computer Science and Technology:
<Link> 23(1):1-14. https:// </Link>

<Link>digitalcommons.lindenwood.edu/faculty-research-papers/462/ </Link>
</P>

<P>Kerr, P. 2016. Adaptive learning. ELT Journal
<Link> 70(1):88-93. https://doi. </Link>

<Link>org/10.1093/elt/ccv055 </Link>
</P>

<P>Lim, WM., Gunasekara, A., Pallant, JL., Pallant, JI., &amp; Pechenkina, E. 2023. Generative AI and the future of education: Ragnarök or reformation? A paradoxical perspective from management educators. The International Journal of Management Education 21. 
<Link>100790. 13 pages. https://doi.org/10.1016/j.ijme.2023.100790 </Link>
</P>

<P>Lo, CK., Hew, KF., &amp; Jong, MSY. 2024. The influence of ChatGPT on student engagement: A systematic review and future research agenda. Computers &amp; Education
<Link> 219. 105100. 21 pages. https://doi. </Link>

<Link>org/10.1016/j.compedu.2024.105100 </Link>
</P>

<P>Mhlanga, D. 2023. Digital transformation education, opportunities, and challenges of the application of ChatGPT to emerging economies. Education Research International
<Link> 2023(4):1-13. https://doi. </Link>

<Link>org/10.1155/2023/7605075 </Link>
</P>

<P>
<Link>OpenAI. 2024. ChatGPT (Large language model). Available at: https://chat. </Link>

<Link>openai.com</Link>
. (Accessed on 15 January 2024). </P>

<P>Pesovski, I., Santos, R., Henriques, R., &amp; Trajkovik, V. 2024. Generative AI for customizable learning experiences. Sustainability 16(3034):1
<Link>23. https://doi.org/10.3390/su16073034 </Link>
</P>

<P>Raschka, S. 2023. What are the different approaches for detecting content generated by LLMs such as ChatGPT? And how do they work 
<Link>and differ? Blog. Available at: https://sebastianraschka.com/ </Link>

<Link>blog/2023/detect-ai.html</Link>
. (Accessed on 14 January 2024). </P>

<P>Rasul, T., Nair, S., Kalendra, D., Robin, M., De Oliveira Santini, F., Ladeira, WJ., Sun, M., Day, I., Rather, RA., &amp; Heathcote, L. 2023. The role of ChatGPT in higher education: Benefits, challenges, and future research directions. Journal of Applied Learning and Teaching 
<Link>6(1):41-56. https://doi.org/10.37074/jalt.2023.6.1.29 </Link>
</P>

<P>Rawas, S. 2023. ChatGPT: Empowering lifelong learning in the digital age of higher education. Education and Information Technologies 
<Link>29(6):1-14. https://doi.org/10.1007/s10639-023-12114-8 </Link>
</P>

<P>Rudolph, J., Tan, S., &amp; Tan, S. 2023. ChatGPT: Bullshit spewer or the end of traditional assessments in higher education? Journal of Applied Learning and Teaching 6(1):342-363. 
<Link>https://doi. </Link>

<Link>org/10.37074/jalt.2023.6.1.9 </Link>
</P>

<P>Sok, S. &amp; Heng, K. 2024. Opportunities, challenges, and strategies for using ChatGPT in higher education: A literature review. Journal of Digital Educational Technology
<Link> 4(1):1-11. https://doi.org/10.30935/ </Link>

<Link>jdet/14027 </Link>
</P>

<P>Sullivan, M., Kelly, A., &amp; McLaughlan, P. 2023. ChatGPT in higher education: Considerations for academic integrity and student learning. Journal of Applied Learning and Teaching 6(1):31-40. 
<Link>https://doi.org/10.37074/jalt.2023.6.1.17 </Link>
</P>

<P>Tiwari, CK., Bhat, MA., Khan, ST., Subramaniam, R., &amp; Khan, MAI. 2024. What drives students toward ChatGPT? An investigation of the factors influencing the adoption and usage of ChatGPT. Interactive Technology and Smart Education
<Link> 21(3):333-355. https:// </Link>

<Link>doi.org/10.1108/ITSE-04-2023-0061 </Link>
</P>

<P>Vargas-Murillo, AR., De la Asuncion, INM., &amp; De Jesús Guevara-Soto, F. 2023. Challenges and opportunities of AI-assisted learning: A systematic literature review on the impact of ChatGPT usage in higher education. International Journal of Learning, Teaching and Educational Research
<Link> 22(7):122-135. https://doi.org/10.26803/ </Link>

<Link>ijlter.22.7.7 </Link>
</P>

<P>Vogels, EA. 2021. Some digital divides persist between rural, urban and suburban America. Pew Research Center. 
<Link>Available at: https:// </Link>

<Link>www.pewresearch.org/fact-tank/2021/08/19/some-digital</Link>

<Link>divides-persist-between-rural-urban-and-suburban-america/</Link>
. (Accessed on 12 January 2024). </P>

<P>Wong, WKO. 2024. The sudden disruptive rise of generative artificial intelligence? An evaluation of their impact on higher education and the global workplace. Journal of Open Innovation: Technology, Market, and Complexity
<Link> 10(2):1-8. https://doi.org/10.1016/j. </Link>

<Link>joitmc.2024.100278 </Link>
</P>

<P>Yang, TC., Hwang, GJ., &amp; Yang, SJH. 2013. Development of an adaptive learning system with multiple perspectives based on students’ learning styles and cognitive styles. Journal of Educational Technology &amp; Society 16(4):185-200. </P>

<Link><Figure id="LinkTarget_10761">

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_58.jpg"/>
</Figure>
</Link>
</Sect>
</Sect>
</Sect>

<Sect>
<H2>Chapter 7 </H2>

<Sect>
<H3>Perceiving Generative AI through Educators’ Eyes: Benefits and Challenges </H3>

<P>Geesje van den Berg </P>

<Link><Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_59.jpg"/>
</Figure>
</Link>

<Link>
<P>Department of Curriculum and Instructional Studies University of South Africa Pretoria, South Africa </P>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_60.jpg"/>
</Figure>
</Link>

<P>Patience K. Mudau </P>

<Link><Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_61.jpg"/>
</Figure>
</Link>

<Link>
<P>Department of Curriculum and Instructional Studies University of South Africa Pretoria, South Africa </P>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_62.jpg"/>
</Figure>
</Link>

<Sect>
<H4>Introduction </H4>

<P>AI (Artificial intelligence) in education offers a wide range of benefits that can enhance the learning experience for students, streamline administrative processes, and support educators. AI refers to advanced computing systems that process the ability to perform tasks that traditionally require human intelligence. These systems can learn, adapt, correct, synthesis, and use large amounts of text to carry out complex tasks (Popenici &amp; Kerr 2017:2). AI has opened new possibilities and encouraged challenges at all levels of education (Silander &amp; Stigmar 2019:274). Its applications in education are on the rise and have recently received much attention. However, AI is not new and has a long history. The origin of AI can be traced to the 1950s, when John McCarthy arranged a two-month workshop at Dartmouth College in the USA. In the proposal for the workshop, McCarthy was the first person to use the term ‘Artificial Intelligence’ in 1956 (Kühl, Schemmer, Goutier, &amp; Satzger 2022:2237). Gen-AI (Generative AI) refers to technology that uses DL (deep learning) models to generate human-like content, like images and text, based on complex and varied prompts, including languages, instructions, and questions (Lim, Gunasekara, Pallant, Pallant, &amp; Pechenkina 2023:2 of 13). Currently, Gen-AI is at the forefront of leveraging DL models to mimic human-like content. </P>

<Link><Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_63.jpg"/>
</Figure>
</Link>

<P>Since the release of Gen-AI models such as ChatGPT (chat generative pre-trained transformer), Copilot, and Meta AI in 2022, much has been written about their promises and challenges in education. However, most of the research involved literature reviews (e.g., Bozkurt 2023; Grassini 2023; Baidoo-Anu &amp; Owusu Ansah 2023), while not much empirical research on how educators regard and use these tools has been published. For this reason, this chapter is based on empirical research by investigating the world of Gen-AI as seen through the lens of a specific group of educators. The chapter explores and reflects on Gen-AI in education from the perspectives of those at its heart – the educators. It not only presents their perspectives on the diverse benefits and challenges of Gen-AI but emphasises the necessity for a balanced, ethical, and informed approach to harnessing Gen-AI’s capabilities. The aim is to pave the way for Gen-AI to complement educators and, in so doing, equally benefit educators and students. </P>
</Sect>

<Sect>
<H4>Literature Review </H4>

<Sect>
<H5>Benefits Related to AI in Education </H5>

<P>First, there are several benefits emanating from the use of AI. One of the main benefits is that it can analyse individual student performance and adapt learning materials to address the specific requirements of each student. This personalised approach helps students to learn independently, addressing their strengths and weaknesses. Some AI tools demonstrate the potential for enhancing teaching and learning. Examples include intelligent tutoring systems such as AutoTutor by the University of Memphis and AI Tutor Pro developed by Contact North. These systems can provide personalised feedback to students based on their individual learning styles and paces (Marouf, Al-Dahdooh, Ghali, Mahdi, Abunasser, &amp; Abu-Naser 2024:10). Another technology including personalised learning systems is Pearson’s AI learning tools that can create personalised study plans and tailor educational content to individual student needs. The emergence of adaptive learning technologies has been a focal point in recent research, as these systems can analyse students’ prior knowledge and learning preferences to create customised learning paths (e.g., Akgun &amp; Greenhow 2022:431). AI simulation-based learning technologies are another example, such as IBM Watson in Education, which can develop personal assistants and interactive learning environments that simulate real-world scenarios, aiding in different subjects such as computer science. Real-life scenarios can be simulated, allowing skills practice in a safe environment (Dai &amp; Ke 2022:7). These examples not only have the ability to enhance the learning experience by providing tailored educational pathways, but also prepare students for future challenges through adaptive and engaging learning environments. </P>

<P>Additionally, the provision of lesson content is a crucial aspect of education, where AI plays an important role in improving and supporting this process. AI systems can help to create educational content, including lesson plans, presentations, and learning materials that benefit educators (Cassidy 2023). This can save educators time and ensure that content is diverse, engaging, and aligned with curriculum standards (Garvey &amp; Maskal 2020:291). Furthermore, AI applications can assist students in language learning by providing speech recognition, language translation, and grammar correction. This interactive language support can enhance language acquisition (McCombs &amp; Valenzuela 2020:771; Ai, Hu, &amp; Zhao 2024:1). AI can actively involve students and educators in learning through various interactive and participatory methods. Interactive learning platforms powered by AI engage students in dynamic and stimulating educational experiences. These platforms often include multimedia content, simulations, and interactive exercises encouraging active participation. AI can provide instant feedback on student responses with these platforms, helping them to understand concepts more effectively and correcting misunderstandings promptly. What also comes as an added advantage is that AI not only saves time for educators by automating routine tasks but also improves the accessibility of knowledge and information for a diverse and global student population (Bozkurt 2023:266). Automated assessment systems that use AI to grade student work have been found to reinforce systemic bias and discrimination by favouring dominant ways of thought, knowledge, and language use (Cheuk 2021:831). However, it is important to note that along with the unexpected benefits, challenges cannot be overlooked. The next section discusses the limitations. </P>
</Sect>

<Sect>
<H5>Challenges Related to AI in Education </H5>

<P>Although the benefits of AI are quite exciting, there are several challenges. Some scholars argue that AI systems can inherit biases from the data on which they are trained. This can lead to discriminatory results, as a lack of diversity in the training data can result in skewed predictions that may disproportionately affect certain groups (Chen, Wu, &amp; Wang 2023:5). Furthermore, it could privilege certain types of culture, knowledge, and perspectives above others (Bozkurt 2023:265). This leads to the conclusion that human touch and empathy may be lacking in purely automated interaction systems. Furthermore, AI lacks EI (emotional intelligence), making it difficult to understand or respond appropriately to human emotions. In certain contexts, human emotions play a crucial role that AI cannot fully replace (Kumar &amp; Martin 2023:1 of 20). On the other hand, ethical concerns may arise if AI is used to manipulate or mislead people in translation services, which often struggle with context, idioms, and language nuances, leading to inaccurate translations (Anderson 2024:4). </P>

<P>The integration of advanced AI systems into education continues to gain momentum, with tools such as ChatGPT standing out for their transformative potential. These technologies use natural language processing to bridge gaps in communication and offer tailored support to both educators and students. Its ability to simulate human-like conversations has sparked interest across educational contexts, offering new pathways for interaction and engagement. In the following section we explore the potential uses, risks, misuses, and opportunities for ChatGPT. This serves as a guide to help educators critically assess the impact this tool may have on education. </P>
</Sect>

<Sect>
<H5>An Overview of ChatGPT – its Benefits and Limitations in Education </H5>

<P>This section focuses on ChatGPT, a noteworthy progression in NLP (natural language processing) and LLMs (large language models), which has extensive consequences in various aspects of our lives, including education. ChatGPT was launched in </P>

<P>November 2022 by the research and development company </P>

<P>OpenAI. It is a component of the GPT (generative pre-trained transformer) family of LLMs. Its creation involved a fine-</P>

<P>tuning process that integrated both supervised learning and </P>

<P>reinforcement learning methods (OpenAI 2024). ChatGPT is </P>

<P>designed to possess advanced intelligence, a natural intuition, and the ability to address intrinsic inquiries in a manner that resembles human interaction (OpenAI 2024). Consistent </P>

<P>with the explanation provided, ChatGPT is transforming our </P>

<P>interactions with technology and setting the stage for a new era </P>

<P>of intelligent conversational AI (Ray 2023:138; Rudolph, Tan, &amp; Tan 2023:344: Sullivan et al. 2023:37). This advanced AI chatbot has been trained on a vast amount of online textual information and employs DL methods (Kung, Cheatham, Medenilla, Sillos, De Leon, Elepaño, Madriaga, Aggabao, Diaz-Candido, Maningo, &amp; Tseng 2023; OpenAI 2024). As an advanced chatbot powered by an LLM, ChatGPT is capable of creating text that often </P>

<P>closely resembles human writing. However, as Baidoo-Anu and </P>

<P>Owusu Ansah (2023:58) argue, the use of ChatGPT in education </P>

<P>is substantial but should not be regarded as a replacement for human educators. It should be used as a complementary </P>

<P>tool to enhance the teaching and learning experience and provide additional support. An advantage is that ChatGPT can </P>

<P>support students by generating ideas for research, analysis, assessments, and writing tasks, which could enhance their </P>

<P>learning experiences. </P>

<P>Using ChatGPT and various other LLMs in HE (higher </P>

<P>education) presents both advantages and challenges, as </P>

<P>confirmed by Rasul, Nair, Kalendra, Robin, De Oliveira Santini, </P>

<P>Ladeira, Sun, Day, Rather, &amp; Heathcote (2023:45). It is important to recognise possible disadvantages, including the risks of academic dishonesty, bias, the spread of misinformation, and poor assessment design, which can hamper the development of vital graduate skills and encourage shallow learning (Ray 2023:138; Rudolph et al. 2023:344; Sullivan, Kelly, &amp; McLaughlan 2023:37). Nevertheless, it also presents new challenges and </P>

<P>complexity, such as the risk of cheating and plagiarism, the </P>

<P>ethical and social implications of the use of AI, and the need to adapt curriculum and assessment methods to the new </P>

<P>technology (Tlili, Shehata, Adarkwah, Bozkurt, Hickey, Huang, </P>

<P>&amp; Agyemang 2023:10 of 24). </P>

<P>As a result, educators and students should be cautious and ensure that they use this technology ethically, dependably, and effectively in academic settings. Educators should recognise both the potential and limitations of ChatGPT and use it wisely and responsibly in their teaching. Additionally, the information provided by ChatGPT may seem credible and well-written but lacks insight and may not be necessarily accurate. Sometimes its credibility is questionable, and it is difficult to establish exactly which portions of the information are factually inaccurate. This can harm the learning experience and decision-making skills of students, as they will rely on it rather than contextualising it in their learning content. </P>
</Sect>
</Sect>

<Sect>
<H4>Methodology </H4>

<Sect>
<H5>Design and Methods </H5>

<P>Against the background presented in the literature review, this study aimed to determine how educators experienced the benefits of Gen-AI tools in their respective teaching contexts. Ultimately, the study aimed to provide information that could help educators to integrate Gen-AI tools effectively into their teaching practices, leading to improved learning outcomes. To achieve this, the study adopted an interpretative paradigm which aimed to understand the participants’ perspectives and interpret the meaning they derived from their contexts (Kivunja &amp; Kuyini 2017:33). An interpretative paradigm further seeks to understand real-world phenomena and the subjective world of human experiences, in this case, educators’ perceptions of Gen-AI. </P>

<P>The participants involved in the study had a unique context, as all of them were educators who were actively teaching while simultaneously pursuing a structured Master of Education programme at an open distance learning university in South Africa. This unique combination of roles and circumstances called for an exploratory case study design, thoroughly examining various viewpoints and aspects within a specific context (Yin 2009:17). It is also intended to provide a foundation for further related research. </P>

<P>The research instrument was an online discussion forum for one of the four taught modules of the structured master programme. The forum was hosted in the university’s learning management system, providing an interactive space for students to engage in academic discourse and exchange ideas with the educator and fellow students. Throughout this specific module, students were encouraged to actively participate in a discussion forum after every two study units. The discussion forums consisted of four questions that were specifically designed to test their understanding and application of the knowledge and theory learnt in the study units. Although the discussions were informal in nature, they played a crucial role in determining the year mark of the students. By engaging in these discussions, the students could apply their newfound knowledge to real-life scenarios, which helped them to understand the subject matter better. </P>

<P>During the discussions, students were expected to adhere to certain guidelines. These guidelines included, among others, always demonstrating respect to their educators and fellow students, even if they had different opinions. The students were also required to share their own opinions, avoid plagiarism, and keep their responses between 100 and 250 words. Moreover, students were required to post between eight and 12 responses in total to their educators and fellow students for the four questions over a two-week period. </P>

<P>As part of a study unit on AI in HE, students who were practising educators at the time of the research had to answer related questions. The two questions posted in the discussion forum were the following: </P>

<L>
<LI>
<Lbl>1. </Lbl>

<LBody>Do you use Gen-AI tools in your teaching/training? </LBody>
</LI>

<LI>
<Lbl>2. </Lbl>

<LBody>Share the most important benefits and challenges of using AI tools in your context. </LBody>
</LI>
</L>

<P>The discussion forum was set up to hide the posts of other students until the first response was received. Data analysis was carried out using a thematic data analysis approach, which typically involves six phases (Braun &amp; Clarke 2012:60-69). The first step is to become familiar with the data and generate initial codes that highlight important aspects. The second step involves systematically coding this data. It is essential to break the data down into meaningful chunks and label them with codes that capture significant features. The third phase involves searching for themes that were then reviewed and refined in the fourth phase to ensure accuracy and coherence. The fifth step, according to Braun and Clarke (2012:68), involves identifying and naming the themes to provide a clear and concise representation of the findings. The last step requires producing a report that reflects the insights we gained from the analysis. De Vos, Delport, Fouché, and Strydom (2011:334) suggest that it is possible that existing literature can be used to compare the data with existing knowledge. In this chapter, we follow this approach and use the available literature to draw meaningful insights from our collected data. During the data analysis, as authors and researchers we first identified the codes and themes separately. Thereafter, we discussed similarities and differences to reach a consensus before collaboratively writing up our findings. The process was conducted manually without the use of AI or other technology tools. </P>
</Sect>

<Sect>
<H5>Ethical Considerations </H5>

<P>Research ethics is a crucial aspect of any research study as it protects the rights and interests of every individual involved in the research process. It is essential to obtain all the necessary permissions and ensure that no harm is caused to any participant. According to De Vos et al. (2011:56), research ethics are moral standards that provide guidance on appropriate conduct towards all participants. In this research study, ethical clearance and permission were obtained from the institution where the study was conducted. Additionally, all participants were asked for permission to use their data for research purposes, while they were informed that their names and data would be kept confidential. All participants gave their consent, which was essential in maintaining ethical standards throughout the study. </P>
</Sect>

<Sect>
<H5>Trustworthiness </H5>

<P>Trustworthiness in qualitative research relates to a set of principles that ensure quality, underpinning both rigour in the research process and confidence in the findings (Lincoln &amp; Guba 1985:289). It is not necessary to accomplish this through rigid standards and procedures; rather, it can be done by implementing strategies that enhance the reader’s confidence that the results truly reflect the perspectives of the study participants rather than those of the researchers (Lietz, Langer, &amp; Furman 2006:443). It is imperative for researchers to adopt a trustworthy approach to qualitative research to ensure the validity of the findings. Measures of trustworthiness as described by Lincoln and Guba (1985:289), namely credibility, transferability, dependability, and confirmability, were considered to ensure the trustworthiness of the study. Credibility was established through member checking, while a detailed description of the context and participants ensured transferability. Dependability was maintained by providing a clear account of the research process, and the use of verbatim quotes from participants supported the findings, addressing confirmability. </P>
</Sect>
</Sect>

<Sect>
<H4>Findings and Discussion </H4>

<Sect>
<H5>Context and Participant Information </H5>

<P>At the time of the research, all participants were registered in a structured master’s in education. This programme consisted of four instructional modules and required the completion of a dissertation of limited scope. The specific module used for the purpose of this research was dedicated to exploring the integration of technology in open distance learning. 14 students were enrolled in this module. However, one student did not participate in this specific discussion. In addition to this, three students responded negatively to the initial question that asked: Do you use Gen-AI tools in your teaching/training? As our research aimed to determine the responses of participants who actively used AI tools in their instructional activities, the responses of these three students were excluded from our analysis. </P>

<P>To avoid influencing their responses, participants were not made aware of the purpose of the study until after their discussions. They were mature part-time students and practising educators at different levels of education. Because of the diversity of the group, it was found ideal to get an indication of how educators on different levels use AI in their teaching contexts. Interestingly, there was no significant difference between the different levels or genders. Information about the participants is given in Table 7.1. </P>

<Table>
<Caption>
<P>Table 7.1: Participant Information </P>
</Caption>

<TR>
<TH>Participants </TH>

<TH>Gender </TH>

<TH>Teaching role </TH>
</TR>

<TR>
<TD>E1 </TD>

<TD>F </TD>

<TD>Primary school teacher </TD>
</TR>

<TR>
<TD>E2 </TD>

<TD>F </TD>

<TD>Educator in public HE </TD>
</TR>

<TR>
<TD>E3 </TD>

<TD>F </TD>

<TD>Primary school teacher </TD>
</TR>

<TR>
<TD>E4 </TD>

<TD>M </TD>

<TD>Educator in public HE </TD>
</TR>

<TR>
<TD>E5 </TD>

<TD>M </TD>

<TD>Trainer in the public sector </TD>
</TR>

<TR>
<TD>E6 </TD>

<TD>M </TD>

<TD>Educator in the technical and vocational education and training sector </TD>
</TR>

<TR>
<TD>E7 </TD>

<TD>F </TD>

<TD>Educator in private HE </TD>
</TR>

<TR>
<TD>E8 </TD>

<TD>F </TD>

<TD>Educator in private HE </TD>
</TR>

<TR>
<TD>E9 </TD>

<TD>F </TD>

<TD>Educator in public HE </TD>
</TR>

<TR>
<TD>E10 </TD>

<TD>F </TD>

<TD>Educator in public HE </TD>
</TR>
</Table>
</Sect>

<Sect>
<H5>Themes and Sub-Themes </H5>

<P>From the findings, themes and subthemes emerged. These are presented in Table 2 below, followed by a discussion. </P>

<P>Table 7.2:Themes and Sub-themes </P>

<Table>
<TR>
<TD>Themes </TD>

<TD>Sub-themes </TD>
</TR>

<TR>
<TD>Benefits </TD>

<TD>Individualised 24/7 guidance and support. Provision of lesson content. Simplification of language. Active involvement. Immediate feedback. Time-saving. Accessibility of knowledge and information. </TD>
</TR>

<TR>
<TD>Challenges </TD>

<TD>Bias. Decrease in human interaction and the lack of EI. Inaccurate information. Overuse of AI. Plagiarism. Data privacy and security. </TD>
</TR>

<TR>
<TD>Possible solutions </TD>

<TD>Development of AI policies. AI literacy training. Guidelines for responsible AI use. Reduce overreliance on AI. Awareness of AI limitations and biases. Collaboration and peer review. </TD>
</TR>
</Table>
</Sect>

<Sect>
<H5>Benefits </H5>

<P>In their responses on the discussion forum, participants shared insightful feedback on various ways in which AI technology has benefitted or could benefit them in their respective teaching contexts. Their responses highlight the numerous benefits and advantages that AI tools can offer educators. </P>

<P>The first sub-theme refers to the fact that AI tools could offer both educators and their students personalised support on a constant level, which was regarded as one of the major benefits. One participant (E1) stated: </P>

<P>AI can offer individualised guidance by adjusting to each student’s particular learning preferences and speed. Students can get assistance and resources outside of usual lecture hours thanks to the 24/7 availability of AI technologies. Chatbots can instantly provide feedback on tasks and queries, assisting my students in quickly correcting their errors. </P>

<P>This comment highlights the potential advantage of individualised support that is always available to students. This personalised assistance can significantly reduce the burden on educators, allowing them to concentrate on other crucial aspects, such as teaching and social interaction, while simultaneously enhancing the learning outcomes of students (Grassini 2023:6 of 13). The literature also confirms this benefit, which can transform how students acquire knowledge and skills (cf. Akgun &amp; Greenhow 2022; Bozkurt 2023:261). In this regard, participant E2 added: </P>

<P>AI tools are available round the clock, offering students the flexibility to seek help or clarification at any time, which is especially beneficial for those with busy schedules or work commitments outside of class. ChatGPT tailors responses to individual student queries. This adaptability allows students to receive guidance that meets their specific needs, fostering a more student-centred learning environment. </P>

<P>Because Gen-AI tools focus on individual needs rather than following a one-size-fits-all approach, it moves the focus away from the educator to the student. It therefore has the potential to help create a learning experience that is more engaging and relevant. Such an approach can empower students to take more control over their learning. However, with these benefits, it is important to use the tool complementary to traditional teaching and not to replace it (Baidoo-Anu &amp; Owusu Ansah 2023:58), as these tools are not always accurate in their responses. Furthermore, as Bozkurt (2023:267) cautions, educators should always promote the ethical, responsible, and thoughtful use of these Gen-AI tools. </P>

<P>The second sub-theme refers to the potential of Gen-AI tools to provide lesson content. These tools can help to brainstorm ideas for lesson topics, activities, and grade levels (Van den Berg &amp; Du Plessis 2023:8). They have the potential to gather information on a wide range of topics and customise worksheets, activities, and assessment tasks to the specific needs of a class or individual students (Saunders 2023). </P>

<P>In this research, the comments focused on the ability of Gen-AI tools to address students’ individual needs. As examples, the following responses were provided: </P>

<P>Lessons can be designed according to the learners’ individual needs whilst providing feedback and guidance in their lessons </P>

<P>(E4). </P>

<P>AI systems like ChatGPT can be trained to comprehend each student’s unique learning style and preferences, resulting in lesson plans and learning opportunities that are suited to each student’s requirements (E6). </P>

<P>These responses highlight an important and emerging trend in education: The use of Gen-AI tools like ChatGPT, for customisation and enhancing lesson plans. This can be regarded as a significant advancement, making lessons more relevant and engaging for students and transforming the way in which educators teach and students learn (Karpouzis, Pantazatos, Taouki, &amp; Meli 2024:2 of 10). </P>

<P>Although mentioned by only one participant (E2), the third sub-theme is significant because of its transformational potential. Furthermore, literature has mentioned language related AI support to students, such as speech recognition, language translation, and grammar correction (e.g., McCombs &amp; Valenzuela 2020). The simplification of language is a crucial aspect of education in various contexts, such as cases where students are taught in another language than their home language and where students have language challenges. The potential of Gen-AI tools to simplify language is a promising development and may greatly impact students’ comprehension and, ultimately, their outcomes. In the response, participant E2 stated: </P>

<P>Gen-AI tools can make complex concepts more accessible to students, particularly those who may struggle with language comprehension or have diverse linguistic backgrounds. This accessibility can level the playing field and ensure that all students can effectively engage with the course content. I often ask my students to use ChatGPT to simplify content if </P>

<P>they struggle. </P>

<P>Participants raised the fact that Gen-AI tools have the potential for active involvement as another benefit, which led to the fourth sub-theme. The interactive nature between humans and machines with regard to Gen-AI tools has been mentioned by various authors (e.g., Bozkurt 2023:264; Grassini 2023:6 of 13). It holds huge potential and has already started to reshape the educational landscape as it can provide, among others, personalised responses, immediate access to information, and tutoring, which can lead to a more equitable education system that better meets the needs of all students. Participant E9 stated: </P>

<P>AI serves as an interactive tool due to the speed with which it </P>

<P>provides solutions to questions or problems presented to it. </P>

<P>Participant E4 mentioned the motivational nature of the active involvement of Gen-AI tools: </P>

<P>Learners can also be actively involved in their learning by </P>

<P>asking questions to AI and be kept motivated to learn. </P>

<P>This comment confirms that the interactive nature of Gen-AI tools can boost motivation because it makes learning more accessible, engaging, and aligned to the specific needs and interests of students. </P>

<P>The fifth sub-theme was related to the potential of Gen-AI tools to save time. This emerged as a strong sub-theme as it was mentioned by most of the participants. Because of its potential to provide responses within seconds, the benefit has been raised by several authors (e.g., Baidoo-Anu &amp; Owusu Ansah 2023:54; Van den Berg &amp; Du Plessis 2023:5). Participants shared the following: </P>

<P>It saves time and effort when I prepare tasks to be administered </P>

<P>to learners because ChatGPT has information on different topics </P>

<P>and disciplines (E3). </P>

<P>ChatGPT helps me improve my teaching and save time. ChatGPT helps me create content, be creative, and solve problems (E5). </P>

<P>I think that benefits in my context include saving time by using AI tools to create general writing pieces, such as supportive text for teaching, support for students, test questions and quizzes, rubrics, tips, and suggestions (E8). </P>

<P>The responses highlight the significant impact of Gen-AI tools such as ChatGPT in educational settings. The consistent mention of time-saving by the participants indicates a broad recognition of this benefit. Beyond saving time, they mentioned improvements in teaching quality, as it assists them in creating content and fostering creativity and problem-solving. The use of AI tools to generate a wide variety of materials, such as quizzes, rubrics, and tips, shows that these tools are not limited to answering questions but can indeed contribute to various aspects of teaching and learning. Some tasks can be allocated to Gen-AI tools, allowing them to focus on tasks that these tools cannot perform. </P>

<P>The final sub-theme was about the capability of Gen-AI tools to provide access to knowledge and information. Although aspects of this sub-theme have been mentioned earlier, it was emphasised by most participants and is therefore discussed here. Participants shared the following: </P>

<P>Easy access to information. I do not have to consult many resources or read textbooks with many pages to get the necessary information (E3). </P>

<P>This ChatGPT bot is very intelligent. It can handle multiple requests at once. I love the fact that it can write out codes and analyse data (E7). </P>

<P>The benefits of using AI tools in my context are that they can serve as an information searching tool. It can provide a basis for facilitators and students to conduct further research on any subject. While it is widely acknowledged that simply copying and pasting information from AI tools is unacceptable, the information it provides can be used as a springing board that directs one to possible helpful sources and to other ideas provoked by what AI tools would have picked on the subject in </P>

<P>question (E10). </P>

<P>These responses highlight the ability of Gen-AI tools to sift numerous resources, enabling educators to quickly obtain relevant information from one source. This can be particularly beneficial in educational settings where time and access to information are crucial. Furthermore, ChatGPT’s ability to handle multiple requests simultaneously, including writing codes and analysing data, illustrates how versatile and powerful Gen-AI tools are. However, as participant E10 mentioned, content created by such tools should not be used as a sole source of information. However, it can serve as an initial guide and can direct users to more detailed sources and information. Similarly, Bozkurt (2023:267, 268) highlights the potential of Gen-AI as an advanced educational technology that holds promise for creating knowledge and content, and therefore transforming education. However, he cautions that there may be risks and limitations that require careful analysis and evaluation. </P>

<P>The second theme dealt with challenges when using Gen-AI tools, as indicated by the participants of this study. </P>
</Sect>

<Sect>
<H5>Challenges </H5>

<P>In responding to the discussion forum question about sharing the most important benefits and challenges of using AI tools in one’s context, several challenges were raised. This theme led to several sub-themes, as discussed below. </P>

<P>The first sub-theme related to the responses from participants that Gen-AI tools can provide biased information. This drawback has been emphasised in the literature (Ray 2023; Bozkurt 2023:263; Van den Berg &amp; Du Plessis 2023; Tlili et al. 2023:10 of 24), cautioning that information needs to be approached with thoughtfulness and cross-checked with other sources. Below are examples of participants’ responses: </P>

<P>ChatGPT is an example of an AI model that might inherit </P>

<P>biases from the data it is trained on, thereby propagating </P>

<P>discrimination and preconceptions (E1). </P>

<P>I am aware of ethical concerns regarding the use of AI, including potential biases in the training data that can lead to biased or discriminatory responses (E3). </P>

<P>In this regard, Grassini (2023:3 of 13) confirms that Gen-AI tools use vast amounts of data to learn and generate content. These data may not always be objective, and if the training data include any biases, it can affect the accuracy and fairness of the generated content. Therefore, ensuring that the data used to train AI models are as unbiased and diverse as possible, is crucial. </P>

<P>The second sub-theme was based on the responses related to the challenge of a decrease in human interaction and the lack of EI. In this regard, Baidoo-Anu and Owusu Ansah (2023:56) confirm that Gen-AI models cannot replicate the same level of human interaction as a real educator, which can disadvantage students, especially those needing a personal connection with an educator. Participants’ responses were as follows: </P>

<P>The main concern for using ChatGPT relates to the fact that learners may have a decreased level of engagement and motivation when they interact with the machine as opposed to when they interact with their teachers (E4). </P>

<P>A heavy dependence on AI in the classroom can result in less human interaction, which is crucial for social and emotional growth. Also, AI lacks emotions and empathy, which are critical for offering pupils emotional assistance (E1). </P>

<P>According to Bozkurt (2023:267), the current Gen-AI tools are lacking EI, which can negatively affect their ability to provide personalised support. This gap in EI needs to be addressed as these technologies should not only provide information but should be supportive and empathetic. Therefore, we must work towards bridging this gap to ensure effective interaction of these technologies with students and provide them with the necessary support. </P>

<P>The third sub-theme referred to the provision of inaccurate information. False and inaccurate information is a reality, according to ChatGPT itself. When asked about its possibility of producing false information, it indicated that because of reasons such as training data limitations, a lack of real-time data, misinterpretation of queries, the complexity and ambiguity of language, and its generative nature, it could produce false information (OpenAI 2024). However, Grassini (2023:5 of 13) predicts that false information will probably be mitigated in future as, according to the author, ChatGPT 4 already shows fewer errors compared to the previous version. However, using Gen-AI tools such as ChatGPT critically is important and supplement their responses with verification from reliable sources. In this regard participants stated: </P>

<P>AI-generated responses may not always be accurate or contextually appropriate. Inaccurate information or guidance could mislead students, leading to misunderstandings or incorrect application of concepts (E2). </P>

<P>So far, I’ve discovered that if you don’t know your material or subject, ChatGPT can mislead the facilitator (E5). </P>

<P>One should also check that there is not an overreliance on the tool to create assessments. If you ask ChatGPT to create questions on a certain level of Bloom’s taxonomy and an NQF1 level, it does so, but you still have to check for accuracy and correctness (E8). </P>

<P>According to the findings of Ray (2023:138) and Sullivan et al. (2023:37), it is important to recognise the possible adverse effects, including the dissemination of false information, which may obstruct the cultivation of vital graduate skills and encourage superficial learning. Therefore, it is crucial to take proactive measures to mitigate such risks and ensure that the information provided is accurate. </P>

<P>The fourth sub-theme referred to the overuse of Gen-AI tools. Interestingly, from the 10 participants, eight indicated </P>

<P>NQF (National Qualifications Framework) levels refer to levels of complexity in learning and qualifications. Each NQF level is designed to reflect the relative difficulty, depth, and sophistication of the knowledge, skills, and competencies that a student is expected to demonstrate for a specific qualification. </P>

<P>that an overreliance on technology is a significant challenge in their respective contexts, making it the most crucial challenge in the findings. In addition, Grassini (2023:3 of 13) highlights the potential risk of students becoming overly dependent on Gen-AI tools. However, instead of prohibiting or suppressing the growth of Gen-AI, as some institutions do, Grassini suggests that educators accept and integrate it wisely and effectively in their teaching. Nevertheless, this may require a well-planned, strategic, and balanced approach that prioritises teaching students independence rather than being dependent on these technologies. The following examples serve as participants’ responses: </P>

<P>Overuse of AI tools may make students dependent on technology for learning, impacting their capacity for critical thought and problem-solving. There’s a risk that students may use these tools as a crutch rather than developing independent learning abilities. Some end up being too lazy to search for information (E1). </P>

<P>Significant concerns revolve around the fact that students may simply ask AI tools to complete assessment tasks and submit them for grading without doing meaningful research and work themselves. This then means that they will not learn anything but that AI tools will generate responses for them to the questions that the facilitators would have posed. Linked to the aforementioned is the concern that AI tools may cause students to be passive thinkers who just take information as it is given to them without critically processing it (E10). </P>

<P>The risk of plagiarism when using AI tools in education, as raised by participants, led to the fifth sub-theme. In this regard, authors such as Tlili et al. (2023:10 of 24) confirm that it is easy to copy and paste information provided by Gen-AI tools without referencing the technology, which leads to plagiarism and may breed laziness among students. Grassini (2023:5 of 13) adds that plagiarism has become a significant worry because of the extensive use of digital tools. Using someone else’s work without proper acknowledgement indeed raises serious questions about the academic integrity of the educational system. What makes the situation even more complicated is that, in some cases, AI detection tools struggle to identify cases of plagiarism (Grassini 2023:5 of 13). Adding to the complexity of plagiarism, was a comment from participant E3, as a school teacher: </P>

<P>Plagiarism is a challenge, as I fear that my learners might be using AI tools for copying since they are allowed to bring phones to school. There is no mechanism put in place to check plagiarism on written content. </P>

<P>The above concern implies that students might copy information provided by Gen-AI tools on their mobile phones and write it down as their own. Participant E8 implied that most educators are aware of this challenge: </P>

<P>I think my concern is the same as everyone else’s concerns, of </P>

<P>which the most important one is plagiarism. </P>

<P>The last sub-theme relates to ethics and revolves around the critical issue of data privacy and security. Although only participant E6 raised the challenge, the seriousness of this challenge needed a separate sub-theme and thus a discussion. When ChatGPT was questioned about the origin of the data it provides, it revealed that it extracts data from various sources, including websites, open books, and other educational resources that form its pre-existing knowledge base. Additionally, it uses real-time data, such as the weather or sports events and incorporates texts it has been trained on, along with information provided by users during ChatGPT conversations (OpenAI 2024). This means that in an educational context, educator and student data are at risk of being used for misuse of data, noneducational purposes, and without acknowledgement. This calls for an urgent need to address data privacy and security concerns. Confirming this, participant E6 stated: </P>

<P>The usage of AI chatbots raises serious ethical concerns since it </P>

<P>may lead to the gathering and use of student data without the </P>

<P>students’ knowledge or consent. </P>

<P>Although this was not part of the question, some participants offered possible solutions to these concerns, and this led to the third theme in this study. </P>
</Sect>

<Sect>
<H5>Possible Solutions to some of the Concerns </H5>

<P>The first solution provided, relates to relevant policies as well as the need for AI literacy training: </P>

<P>Academic institutions must be proactive in their use of AI tools for both facilitators and students. Institutions must harmonise their policies and address AI. First, it needs to be clear whether AI tools are permitted or not. Also, if they are not permitted, it should be communicated to students what will happen if a student is discovered using it, and if it is permitted, what exactly is permissible. Training facilitators and students is imperative on how to utilise AI tools and discussing the benefits and drawbacks of employing AI tools (E5). </P>

<P>The next two possible solutions provided, indicated that both these participants were using ChatGPT and encouraged their students to also use it. Both participants implied that they provided guidelines on how their students should use Gen-AI tools in their classes: </P>

<P>Practically, I advise my students that I am one of the first in our institution to use and publish on ChatGPT so they should learn from it and not copy information from it verbatim for me. That puts them on their toes (E9). </P>

<P>I believe we can somewhat prevent overreliance on AI. Various teaching techniques could be employed, including brainstorming sessions, group discussions, and problem-solving activities. This strategy can aid in developing students’ critical thinking and decision-making skills. I like to raise awareness of the biases, mistakes, and restrictions that AI technologies may have. Students are also encouraged to collaborate with each other, participate in peer reviews, and offer criticism. This will foster a sense of connection and engagement that AI systems cannot imitate. This will clarify to students the significance of utilising AI technologies as a </P>

<P>supplement, not a replacement (E8). </P>
</Sect>
</Sect>

<Sect>
<H4>Conclusion </H4>

<P>The purpose of this chapter was to report on students’ perceptions of the benefits and challenges of Gen-AI tools in their respective educational contexts. Based on an interpretative paradigm, we used an exploratory case study design to analyse the responses to questions on the discussion forum of a master’s module in education. The study provides valuable insights from educators who actively engage with Gen-AI tools. The most prominent benefits identified were that these digital tools can provide individualised, 24/7 guidance and support and lesson content. They also shared that these tools can assist with simplifying language, a unique benefit that could not be found in previous studies. Furthermore, Gen-AI tools provide opportunities for active involvement. They save time and make information and knowledge accessible. The participants’ main challenges were that the technologies could provide biased information, lead to decreased human interaction and lack EI. Additionally, it could provide inaccurate information and has a risk that students can over-rely on these tools. They further identified the risks of plagiarism and data privacy and security as challenges. Lastly, some participants provided guidelines on how these technologies could be approached and how the risks could be mitigated. These guidelines focused on the need for policies and AI literacy training, setting an example and providing guidelines to students in the use of Gen-AI. An additional important point raised was that Gen-AI tools should complement, and not replace, the human elements of teaching and learning, ensuring a wholistic and inclusive educational approach. </P>

<P>While the study provides important insights, it comes with limitations that must be considered. The main limitation of this study is the small number of participants and specific context in which they were studied. It is possible that if a different sample had been used, the findings could have been different. Additionally, the study relied on a single instrument to collect data, which may not have provided a comprehensive understanding of the responses. As a result, more empirical research with the same and different groups and different instruments could lead to a deeper understanding of how Gen-AI tools can be used to benefit all its users equally, ultimately leading to improved learning outcomes. </P>
</Sect>

<Sect>
<H4>References </H4>

<P>Ai, Y., Hu, Y., &amp; Zhao, W. 2024. Interactive learning: Harnessing technology to alleviate foreign language anxiety in foreign literature education. Interactive Learning Environments 2023:1-20. 
<Link>https://doi.org/10.1080/10494820.2024.2409990 </Link>
</P>

<P>Akgun, S. &amp; Greenhow, C. 2022. Artificial intelligence in education: Addressing ethical challenges in K-12 settings. AI and Ethics 
<Link>2(3):431-440. https://doi.org/10.1007/s43681-021-00096-7 </Link>
</P>

<P>Anderson, O. 2024. Ethical considerations in machine translation: Bias, fairness, and accountability. Innovative Computer Sciences Journal 
<Link>10(1):1-7. https://doi.org/10.48047/resmil.v10i1.22 </Link>
</P>

<P>Baidoo-Anu, D. &amp; Owusu Ansah, L. 2023. Education in the era of generative artificial intelligence (AI): Understanding the potential benefits of ChatGPT in promoting teaching and learning. Journal of AI 7(1):52-62. 
<Link>https://doi.org/10.61969/jai.1337500 </Link>
</P>

<P>Bozkurt, A. 2023. Unleashing the potential of generative AI, conversational agents and chatbots in educational praxis: A systematic review and bibliometric analysis of GenAI in education. Open Praxis 
<Link>15(4):261-270. https://doi.org/10.55982/openpraxis.15.4.609 </Link>
</P>

<P>Braun, V. &amp; Clarke, V. 2012. Thematic analysis. In Cooper, H., Camic, PM., Long, DL., Panter, AT., Rindskopf, D., &amp; Sher, KJ. (Eds.): APA handbook of research methods in psychology. Vol. 2: Research designs: Quantitative, qualitative, neuropsychological, and biological, 57-71. 1st ed. Washington DC: American Psychological Association. </P>

<P>Cassidy, C. 2023. Australian universities to return to ‘pen and paper’ exams after students caught using AI to write essays. The Guardian
<Link>. 10 January 2023. Available at: https://www. </Link>

<Link>theguardian.com/australia-news/2023/jan/10/universities-to</Link>

<Link>return-to-pen-and-paper-exams-after-students-caught</Link>

<Link>using-ai-to-write-essays</Link>
. (Accessed on 12 January 2024). </P>

<P>Chen, P., Wu, L., &amp; Wang, L. 2023. AI fairness in data management and analytics: A review on challenges, methodologies and applications. Applied Sciences
<Link> 13(18):1-33. https://doi.org/10.3390/ </Link>

<Link>app131810258 </Link>
</P>

<P>Cheuk, T. 2021. Can AI be racist? Color-evasiveness in the application of machine learning to science assessments. Science Education 
<Link>105(5):825-836. https://doi.org/10.1002/sce.21671 </Link>
</P>

<P>Dai, CP. &amp; Ke, F. 2022. Educational applications of artificial intelligence in simulation-based learning: A systematic mapping review. Computers and Education: Artificial Intelligence 
<Link>3:1-17. https://doi. </Link>

<Link>org/10.1016/j.caeai.2022.100087 </Link>
</P>

<P>De Vos, AS., Delport, CSL., Fouché, C., &amp; Strydom, H. 2011. Research at grass roots: A primer for the social science and human professions. Pretoria: Van Schaik. </P>

<P>Garvey, C. &amp; Maskal, C. 2020. Sentiment analysis of the news media on artificial intelligence does not support claims of negative bias against artificial intelligence. Omics: A Journal of Integrative Biology
<Link> 24(5):286-299. http://doi.org/10.1089/omi.2019.0078 </Link>
</P>

<P>Grassini, S. 2023. Shaping the future of education: Exploring the potential and consequences of AI and ChatGPT in educational settings. Education Sciences
<Link> 13. 692. 13 pages. https://doi.org/10.3390/ </Link>

<Link>educsci13070692 </Link>
</P>

<P>Karpouzis, K., Pantazatos, D., Taouki, J., &amp; Meli, K. 2024. Tailoring education with GenAI: A new horizon in lesson planning. arXiv preprint:
<Link>2403.12071. 10 pages. https://doi.org/10.48550/ </Link>

<Link>arXiv.2403.12071 </Link>
</P>

<P>Kivunja, C. &amp; Kuyini, AB. 2017. Understanding and applying research paradigms in educational contexts. International Journal of Higher Education
<Link> 6(5):26-41. https://doi.org/10.5430/ijhe.v6n5p26 </Link>
</P>

<P>Kumar, H. &amp; Martin, A. 2023. Artificial emotional intelligence: Conventional and deep learning approach. Expert Systems with Applications
<Link> 212. 118651. 20 pages. https://doi.org/10.1016/j. </Link>

<Link>eswa.2022.118651 </Link>
</P>

<P>Kühl, N., Schemmer, M., Goutier, M., &amp; Satzger, G. 2022. Artificial intelligence and machine learning. Electronic Markets 32(4):22352244. </P>

<P>Kung, TH., Cheatham, M., Medenilla, A., Sillos, C., De Leon, L., Elepaño, C., Madriaga, M., Aggabao, R., Diaz-Candido, G., Maningo, J., &amp; Tseng, V. 2023. Performance of ChatGPT on USMLE: Potential for AI-assisted medical education using large language models. PLoS Digital Health
<Link> 2(2). 198. 12 pages. https://doi.org/10.1371/journal. </Link>

<Link>pdig.0000198 </Link>
</P>

<P>Lietz, CA., Langer, CL., &amp; Furman, R. 2006. Establishing trustworthiness in qualitative research in social work: Implications from a study regarding spirituality. Qualitative Social Work 5(4):441-458. 
<Link>https://doi.org/10.1177/1473325006070288 </Link>
</P>

<P>Lim, WM., Gunasekara, A., Pallant, JL., Pallant, JI., &amp; Pechenkina, E. 2023. Generative AI and the future of education: Ragnarök or reformation? A paradoxical perspective from management educators. The International Journal of Management Education 
<Link>21(2). 13 pages. https://doi.org/10.1016/j.ijme.2023.100790 </Link>
</P>

<P>Lincoln, YS. &amp; Guba, EG. 1985. Naturalistic inquiry. Newbury Park: Sage. 
<Link>https://doi.org/10.1016/0147-1767(85)90062-8 </Link>
</P>

<P>Marouf, A., Al-Dahdooh, R., Ghali, MJA., Mahdi, AO., Abunasser, BS., &amp; Abu-Naser, SS. 2024. Enhancing education with artificial intelligence: The role of intelligent tutoring systems. International Journal of Engineering and Information Systems 8(8):10-16. </P>

<P>McCombs, M. &amp; Valenzuela, S. 2020. Setting the agenda: Mass media and public opinion. Hoboken: John Wiley. </P>

<P>
<Link>OpenAI. 2024. ChatGPT (large language model). Available at: https://chat. </Link>

<Link>openai.com</Link>
. (Accessed on 5 January 2024). </P>

<P>Popenici, SAD. &amp; Kerr, S. 2017. Exploring the impact of artificial intelligence on teaching and learning in higher education. Research and Practice in Technology Enhanced Learning 1:3-13. 
<Link>https://doi.org/10.1186/s41039-017-0062-8 </Link>
</P>

<P>Rasul, T., Nair, S., Kalendra, D., Robin, M., De Oliveira Santini, F., Ladeira, WJ., Sun, M., Day, I., Rather, RA., &amp; Heathcote, L. 2023. The role of ChatGPT in higher education: Benefits, challenges, and future research directions. Journal of Applied Learning and Teaching 
<Link>6(1):33-57. https://doi.org/10.37074/jalt.2023.6.1.29 </Link>
</P>

<P>Ray, PP. 2023. ChatGPT: A comprehensive review on background, applications, key challenges, bias, ethics, limitations and future scope. Internet of Things and Cyber-Physical Systems 3:121-154. 
<Link>https://doi.org/10.1016/j.iotcps.2023.04.003 </Link>
</P>

<P>Rudolph, J., Tan, S., &amp; Tan, S. 2023. ChatGPT: Bullshit spewer or the end of traditional assessments in higher education? Journal of Applied Learning and Teaching
<Link> 6(1):342-362. https://doi.org/10.37074/ </Link>

<Link>jalt.2023.6.1.9 </Link>
</P>

<P>Saunders, S. 2023. Rather than ban generative AI, universities must learn from the past. University World News. 24 February 2023. 
<Link>Available at: https://www.universityworldnews.com/post. </Link>

<Link>php?story=20230221075136410</Link>
. (Accessed on 12 January 2024). </P>

<P>Silander, C. &amp; Stigmar, M. 2019. Individual growth or institutional development? Ideological perspectives on motives behind Swedish higher education teacher training. Higher Education 
<Link>77:265-281. https://doi.org/10.1007/s10734-018-0272-z </Link>
</P>

<P>Sullivan, M., Kelly, A., &amp; McLaughlan, P. 2023. ChatGPT in higher education: Considerations for Academic Integrity and Student Learning. Journal of Applied Learning and Teaching 6(1):31-40. 
<Link>https://doi.org/10.37074/jalt.2023.6.1.17 </Link>
</P>

<P>Tlili, A., Shehata, B., Adarkwah, MA., Bozkurt, A., Hickey, DT., Huang, R., &amp; Agyemang, B. 2023. What if the devil is my guardian angel: ChatGPT as a case study of using chatbots in education. Smart Learning Environments
<Link> 10(1). 24 pages. https://slejournal. </Link>

<Link>springeropen.com/articles/10.1186/s40561-023-00237-x </Link>
</P>

<P>Van den Berg, G. &amp; Du Plessis, E. 2023. ChatGPT and generative AI: Possibilities for Its contribution to lesson planning, critical thinking and openness in teacher education. Educational Sciences 
<Link>13:1-12. https://doi.org/10.3390/educsci13100998 </Link>
</P>

<P>Yin, RK. 2009. Case study research: Design and methods. Vol. 5. London: Sage. </P>
</Sect>
</Sect>
</Sect>

<Sect>
<H2 id="LinkTarget_10762">Chapter 8 </H2>

<Link><Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_64.jpg"/>
</Figure>
</Link>

<Sect>
<H3>Artificial Intelligence such as Generative Pretrained Transformer: Reimagining Online Assessment Methods </H3>

<P>Elize C. du Plessis </P>

<Link><Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_65.jpg"/>
</Figure>
</Link>

<Link>
<P>Department of Curriculum and Instructional Studies University of South Africa Pretoria, South Africa </P>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_66.jpg"/>
</Figure>
</Link>

<Sect>
<H4>Introduction </H4>

<P>Online assessment techniques have become increasingly popular recently, especially with the rise of COVID-19 (Coronavirus disease of 2019), since they offer practical and effective ways to access knowledge, skills, and competencies. 2023 will go down in history as the year in which AI (artificial intelligence) – more significantly, LLMs (large language models) – and their brazen poster child, ChatGPT (chat generative pre-trained transformer), have revolutionised everything (Bekker 2024). The introduction of advanced LLMs like ChatGPT, developed by OpenAI, can potentially transform online assessment significantly. However, while there are ongoing discussions about ChatGPT, more practical examples need to be provided to showcase its utilisation in online assessments. It is crucial to shed light on the assessment process, as many students focus solely on passing exams rather than embracing the learning experience throughout the year, leading to a disconnect between the process and the outcome (MacGregor 2023). </P>

<P>Thanks to the swift growth of AI, the current field of education has access to brand-new, cutting-edge solutions. ChatGPT, a LLM that can create text that resembles human language and participate in interactive conversations, is one such technology. Incorporating ChatGPT and other related LLMs replaces intriguing opportunities and challenges as online assessment becomes more common. Combining AI and LLMs like ChatGPT introduces a revolution in online assessment. AI-powered applications enable automated grading and individualised feedback and improve efficiency (Celik, Dindar, Muukkonen, &amp; Järvelä 2022:617). However, integrating AI-powered tools raises concerns and challenges about the fairness, validity, and originality of material accessed in online learning environments (Hoyos 2023). For instance, ChatGPT is built on OpenAI ChatGPT 3, which can create narratives, develop computer codes, translate and complete calculations, and autofill images (Rudolph, Tan, &amp; Tan 2023:342). This chapter examines the potential effects of adopting ChatGPT in online assessment by considering its benefits and potential drawbacks for educators and institutions. The chapter also suggests potential answers to address these problems and propose strategies for online assessment. </P>

<Link><Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_67.jpg"/>
</Figure>
</Link>
</Sect>

<Sect>
<H4>Conceptual Framework </H4>

<P>Understanding ChatGPT and Online Assessment </P>

<P>GPT is a LLM utilised in various NLP (natural language processing) applications, including ChatGPT (Dheda 2023). GPT plays a vital role in ChatGPT by enabling the chatbot to generate responses that closely resemble human language when users interact with it. ChatGPT is a specific variation of GPT 3, an LLM developed by OpenAI (Cotton, Cotton, &amp; Shipway 2023:1 of 12). Since its introduction in 2021 and November 2022, ChatGPT has attracted much interest from the tech and media sectors. The transformer design is the foundation for GPT 3, which has become widely used in various NLP tasks (Cotton, Cotton, &amp; Shipway 2023:1 of 12). With 175 billion parameters, GPT 3 stands out for its massive size, making it one of the most essential LLMs on the market today (Gpt Generative Pretrained Transformer, Thunström, &amp; Steingrimsson 2022:3 of 7; Sümer 2023:2). It is well known for its ability to perform a wide range of language-related tasks, including translation, summarisation, question answering, and text generation, frequently with little to no specialised training needed for each task. </P>

<P>Since its introduction, GPT 3 has been used in several fields, such as language translation, content creation, and language modelling. It is noteworthy that GPT 3 has proven to be highly accurate at translating across languages and producing cohesive and educational summaries of significant publications (Brown, Mann, Ryder, Subbiah, Kaplan, Dhariwal, Neelakantan, Shyam, Sastry, Askell, Agarwal, Herbert-Voss, Krueger, Henighan, Child, Ramesh, Ziegler, Wu, Winter, Hesse, Chen, Sigler, Litwin, Gray, Chess, Clark, Berner, McCandlish, Radford, Sutskever, &amp; Amodei 2020:7). Furthermore, GPT 3’s ability to comprehend and react to natural language inputs has been demonstrated by the development of chatbots that can talk with people and answer enquiries. The excellent abilities of GPT 3 have drawn much interest and inspired discussions on the prospective applications and effects of AI in language processing (Cotton et al. 2023:1 of 12). </P>

<P>Hsuan-Tien Lin, a professor in the Department of Computer Science and Information Engineering at NTU (the National Taiwan University), argues that when introducing ML (machine learning) to information retrieval or other fields, ‘we need to be careful in understanding what the technique is about to ensure the best use’ (Sharma 2024). Instead, many researchers rush to use ML without comprehensively understanding its techniques and limitations. They may not think clearly about training, validation, and test sets and ‘end up with a rough instead of a rigorous use of machine learning methods’ (Chih-Jen Lin, quoted in Sharma 2024). Sharma (2024) notes that ‘[t]he phenomenon of rough use of machine learning methods is common and sometimes unavoidable. AI moves quickly from research laboratories to real-world applications, yet ML models that work well in the laboratory can fail in real-world use, with essential consequences.’ </P>

<P>It is crucial to understand ChatGPT’s capabilities and constraints to gauge its possible influence on online assessment now that it is built on GPT 3.5 or GPT 4. Section 2 will give an overview of ChatGPT, highlighting its ability to produce contextually appropriate responses and its potential to participate in interactive conversations. It also presents the idea of online assessment, which has grown in acceptance due to its practicality and scalability, underscoring the requirement for efficient and flexible assessment techniques. </P>

<Sect>
<H5>The Benefits of ChatGPT in Online Assessment </H5>

<P>Language formats like ChatGPT bring many advantages to online chat practice. One of the main advantages is the automatic grading, which allows for a better analysis of student responses. This feature allows for immediate feedback, thus reducing the workload of teachers (Wong 2023). In addition, ChatGPT can adapt to the needs of individual students, providing a personalised assessment that accommodates different learning styles and abilities (Ahmad 2023). The potential time and cost savings resulting from automated labelling can benefit educational institutions. Overall, ChatGPT provides benefits such as quick response, personalised viewing experience, and classification, helping to improve online viewing behaviour. </P>

<P>The type of language that AI provides has great value in supporting asynchronous communication. This ability allows students to participate in discussions and ask questions without the importance of real-time interaction, increasing students’ collaboration (Bhosale, Pujari, Pujari, &amp; Multani 2020:227; Li &amp; Xing 2021:186, 187, 209). In addition, ChatGPT can improve communication between students by creating virtual different students, allowing them to work together (Lewis 2022:202). This promotes a sense of teamwork and collaboration among students. In addition, ChatGPT helps distance learning, which is especially useful for students who cannot attend physical classes due to health problems or other restrictions (Barber, Bird, Fleming, Titterington-Giles, Edwards, &amp; Leyland 2021:106). These benefits demonstrate ChatGPT’s ability to improve student engagement, collaboration, and accessibility in educational settings. </P>
</Sect>

<Sect>
<H5>Limitations of ChatGPT and Similar Language Models </H5>

<P>Ensuring the accuracy and validity of the monitoring process is very important. However, the integration of ChatGPT and similar language versions and projections introduce some challenges and weaknesses that must be addressed. One concern is bias because of the feedback from these types; the data may contain internal disturbances (Cotton et al. 2023:1 of 12). This raises questions about the validity and impartiality of the surveys conducted using AI-based tools. </P>

<P>Another challenge is the need for a more contextual understanding by LLMs like ChatGPT. While they excel at generating human-like text, they may need help to fully comprehend a given assessment task’s nuances and specific context, potentially leading to inaccurate or inappropriate responses (Hoyos 2023). This limitation poses a risk to the validity of assessments that require a deep understanding of complex subject matter. </P>

<P>Furthermore, assessing higher-order thinking skills, such as critical thinking and creativity, can be challenging for AI-powered assessment tools. These skills often require complex reasoning and synthesis of information, which may require more work for current LLMs to accurately evaluate (Cotton et al. 2023:1 of 12). Thus, there is a need for careful consideration and development of appropriate assessment strategies that align with the desired outcomes. </P>

<P>Addressing these challenges and limitations is crucial to ensure the integrity and effectiveness of AI-powered assessments. Strategies such as fine-tuning models, incorporating human oversight, and continuous improvement of the training data can help mitigate biases and enhance contextual understanding. Additionally, exploring alternative assessment methods that capture higher-order thinking skills may be necessary to complement the capabilities of LLMs like ChatGPT. </P>

<P>According to Marr (2023), Chowdhury and Rahman (2023:710), and Karthikeyan (2023:284), ChatGPT has several drawbacks: </P>

<L>
<LI>
<Lbl>• </Lbl>

<LBody>Insufficient common sense: ChatGPT lacks human-level common sense and background knowledge despite its ability to produce human-like responses and have access to a tremendous amount of information. As a result, it could sometimes give illogical or incorrect answers to particular queries or circumstances. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Lack of EI: ChatGPT lacks accurate EI, even though it can provide sympathetic replies. It is challenging to recognise tiny emotional cues or react correctly in emotionally charged circumstances. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Contextual understanding limitations: ChatGPT has trouble comprehending context, especially regarding humour and sarcasm. It may be challenging to understand the subtleties of human communication, even though it is proficient in language processing. Consequently, it may be unable to decipher the intended meaning of amusing or caustic messages, resulting in inappropriate or irrelevant answers. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Having trouble creating long-form, structured content: ChatGPT currently produces long-form, structured content. Although it may generate grammatically sound and meaningful phrases, producing lengthy content that follows particular formats, patterns, or narratives might be challenging. ChatGPT is therefore better suited to producing succinct content, such as summaries, bullet points, or succinct explanations. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Multitasking limitations: ChatGPT functions best when concentrated on a single job or goal. It is challenging to prioritise activities when given several at once, which lowers accuracy and efficacy. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Potential for biased responses: Because ChatGPT is trained on a sizable dataset that might include biases or prejudices, it may inadvertently produce biased or discriminatory responses. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Limited knowledge: ChatGPT cannot acquire all human knowledge, even if it can access a substantial amount of data. It could have trouble responding to enquiries on highly specialised or narrow subjects, and it might not be current on the latest advancements or modifications in particular domains. </LBody>
</LI>

<L>
<LI>
<Lbl>• </Lbl>

<LBody>Grammar and accuracy problems: ChatGPT’s sensitivity to misspellings, typos, and grammatical errors is currently restricted. Furthermore, it could generate theoretically valid responses that are inaccurate or irrelevant in the context. This restriction is especially noticeable when handling specialist or sophisticated data where precision and accuracy are essential. It is best to use other methods to confirm the data produced by ChatGPT. </LBody>
</LI>

<LI>Awareness of these limitations is essential to evaluate ChatGPT’s outputs critically and take the necessary steps to verify the information it provides. It is crucial to acknowledge and understand the significant limitations and drawbacks of ChatGPT despite its highly advanced capabilities. AlZu’bi, Mughaid, Quiam, and Samar (2023:3), as well as Hoyos (2023), highlight the following challenges and caveats associated with these models, particularly in an educational setting: </LI>
</L>

<LI>
<Lbl>• </Lbl>

<LBody>Reliability and accuracy: ChatGPT is imperfect and may occasionally give inaccurate or deceptive information. Teaching students how to assess and validate their work critically by utilising additional trustworthy sources is essential. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Bias: ChatGPT may produce biased results that reinforce negative stereotypes or prejudices because it is trained on data that may contain biases. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Dependency: Students who rely too much on ChatGPT may find it challenging to analyse critically, solve challenges, and actively participate in class content. They could grow unduly reliant on the model for solutions and responses. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>A lack of creativity: Students may be deterred from applying their creative thinking and problem-solving skills if they use ChatGPT to write assignments or create presentation slides. It might discourage their creativity and prevent them from honing their abilities. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Security and privacy: ChatGPT presents security and privacy issues in the classroom. Sensitive information may be shared or leaked, so the proper security measures must be taken. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Cheating: ChatGPT may be abused for academic dishonesty, including directly copying sample responses or plagiarism. It makes maintaining fair evaluation procedures and academic integrity more difficult. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Technical troubles: ChatGPT may experience technical problems like software compatibility or network connectivity concerns. These technological hiccups could negatively affect the learning environment in the classroom and make it more challenging to integrate the model. </LBody>
</LI>
</L>

<P>Integrating ChatGPT and similar LLMs into online assessments requires careful consideration of the risks associated with security, privacy, and academic integrity. Safeguards should be implemented to address cheating, collusion, and unauthorised access to the models. </P>
</Sect>

<Sect>
<H5>Professional Development and Training for Educators </H5>

<P>It is increasingly difficult for expert educators to teach, especially if we look at online assessment. Educators need appropriate professional development and training to leverage ChatGPT and similar LLMs in online assessment effectively. This section emphasises equipping educators with the necessary skills to effectively understand and use AI-powered assessment tools. Professional development programmes can focus on AI literacy, assessment design, understanding model limitations, and promoting ethical practices in online assessment. </P>

<P>Improved Question-Answering Abilities </P>

<P>ChatGPT and similar LLMs exhibit exceptional skills in answering questions, empowering them to effectively comprehend and address various inquiries. Integrating these models into online assessments can introduce more interactive and dynamic question formats. This would enable students to receive personalised responses and immediate feedback, fostering an enhanced assessment experience that encourages deeper learning (Lin, Cao, Zhang, Chen, Hong, Xu, &amp; Tan 2022). </P>

<P>Personalised Assessments </P>

<P>LLMs such as ChatGPT hold promise in facilitating personalised assessments by adapting the difficulty level and content of questions to align with each student’s performance and learning requirements (Sümer 2023). Through analysing students’ responses, these models can generate suitable follow-up questions, allowing for a more precise assessment of their knowledge and abilities. By employing personalised assessments, a more precise depiction of students’ capabilities can be achieved, along with targeted interventions to facilitate their improvement. </P>

<P>Automated Evaluation and Feedback </P>

<P>One of the notable impacts of LLMs on online assessments is their potential for automated evaluation and feedback. ChatGPT and similar models can analyse students’ responses, detect patterns, and provide immediate feedback on objective and subjective questions. Automated evaluation saves educators time, allowing them to focus on other teaching aspects while providing timely feedback to students and facilitating quicker learning iterations (Messer 2022). </P>

<P>Mitigating Bias and Standardising Assessments </P>

<P>Online assessments often encounter challenges associated with bias and subjectivity in grading. LLMs can assist in addressing these concerns by offering standardised and objective evaluation criteria. Training the models on diverse datasets and adhering to ethical guidelines make it possible to reduce assessment bias and ensure equitable evaluation for all students. However, caution must be exercised to prevent the incorporation of any existing biases present in the training data (Cotton et al. 2023:9 of 12). </P>
</Sect>

<Sect>
<H5>Opportunities/Implications for Educators </H5>

<P>ChatGPT could offer many opportunities for HE (higher education) students and educators. The next sections describe the vital opportunities of ChatGPT for HE (Farrokhnia, Banihashem, Noroozi, &amp; Wals 2023:4-6 of 15). </P>

<P>Increasing Accessibility of Information </P>

<P>ChatGPT can provide instructors and students convenient access to information in various subjects and platforms (such as websites or smartphone apps). Additionally, because it allows for a written response rather than merely a list of sources, it is a more effective tool than conventional search engines. Students can more easily and quickly access fine-grained information thanks to ChatGPT’s ability to locate and summarise pertinent material (Cascella, Montomoli, Bellini, &amp; Bignami 2023:2). From a teaching standpoint, ChatGPT can free up students’ access time so they can read and think critically longer about the assigned material. Educators can find and create suitable teaching materials with the help of ChatGPT. With a set of parameters and restrictions, it can also assist them in creating lesson plans for instruction (Zhai 2022:5 of 18). For instance, based on CAPS (the South African curriculum and assessment policy statement), the author requested that ChatGPT develop a lesson plan for Grade 10 Life Orientation. The following plan, which centres on the subject of Career development and planning, was produced in a matter of seconds: </P>

<P>Grade Level: Grade 10 </P>

<P>Subject: Life Orientation </P>

<P>Topic: Career Development and Planning </P>

<P>Duration: 2 lessons (90 minutes each) </P>

<P>Lesson 1: Exploring Career Options </P>

<P>Objective: By the end of this lesson, students should be able to identify and describe various career options and understand the importance of career planning. </P>
</Sect>

<Sect>
<H5>Materials Needed: </H5>

<L>
<LI>
<Lbl>• </Lbl>

<LBody>Career pamphlets, books, or online resources. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Whiteboard and markers. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Computer and projector (optional). </LBody>
</LI>
</L>
</Sect>

<Sect>
<H5>Lesson Structure: </H5>

<P>Introduction (15 minutes): </P>

<L>
<LI>
<Lbl>• </Lbl>

<LBody>Welcome the students and explain the lesson objectives. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Discuss why career planning is essential and how it can impact their future. </LBody>
</LI>
</L>

<P>Activity 1: Career Exploration (30 minutes): </P>

<L>
<LI>
<Lbl>• </Lbl>

<LBody>Divide the class into small groups. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Provide each group with career pamphlets, books, or access to online career resources. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Instruct students to research and identify at least three different careers they find interesting. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Ask each group to prepare a short presentation about these careers, including job descriptions, qualifications, and potential earnings. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Groups present their findings to the class. </LBody>
</LI>
</L>
</Sect>

<Sect>
<H5>Discussion (20 minutes): </H5>

<L>
<LI>
<Lbl>• </Lbl>

<LBody>Lead a class discussion on the career options presented. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Encourage students to share their thoughts on which careers appeal to them and why. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Discuss the importance of considering personal interests, skills, and values when choosing a career. </LBody>
</LI>
</L>
</Sect>

<Sect>
<H5>Homework (5 minutes): </H5>

<P>Assign students to write a short reflection on one career they found exciting and why they think it might be a good fit. </P>

<P>Lesson 2: Career Planning and Goal Setting </P>

<P>Objective: By the end of this lesson, students should be able to create a basic career plan and set achievable career goals. </P>
</Sect>

<Sect>
<H5>Materials Needed: </H5>

<L>
<LI>
<Lbl>• </Lbl>

<LBody>Whiteboard and markers. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Career planning worksheets (prepared in advance). </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Computer and projector (optional). </LBody>
</LI>
</L>
</Sect>

<Sect>
<H5>Lesson Structure: </H5>

<P>Introduction (10 minutes): </P>

<P>• Review the previous lesson’s key points about career exploration. </P>

<P>Activity 1: Career Planning (40 minutes): </P>

<L>
<LI>
<Lbl>• </Lbl>

<LBody>Distribute career planning worksheets to each student. </LBody>
</LI>

<L>
<LI>
<Lbl>• </Lbl>

<LBody>Guide students through the process of creating a basic career plan, which should include: </LBody>
</LI>

<L>
<LI>
<Lbl>• </Lbl>

<LBody>Their career goals (short-term and long-term). </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>The steps they need to take to reach those goals. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Necessary qualifications and skills. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Potential challenges and how to overcome them. </LBody>
</LI>
</L>
</L>

<LI>
<Lbl>• </Lbl>

<LBody>Encourage students to think about the importance of education and training in their chosen career paths. </LBody>
</LI>
</L>
</Sect>

<Sect>
<H5>Discussion (20 minutes): </H5>

<L>
<LI>
<Lbl>• </Lbl>

<LBody>Ask students to share their career plans with the class. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Discuss the importance of setting SMART (Specific, measurable, achievable, relevant, time-bound) goals. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Talk about the role of perseverance and adaptability in achieving career goals. </LBody>
</LI>
</L>
</Sect>

<Sect>
<H5>Conclusion (5 minutes): </H5>

<P>Summarise the lesson’s key points and emphasise the importance of continuously revisiting and updating their career plans. </P>
</Sect>

<Sect>
<H5>Homework (5 minutes): </H5>

<P>Assign students to refine their career plans based on class discussion and to set specific, measurable short-term goals to work towards in the coming weeks. </P>
</Sect>

<Sect>
<H5>Assessment: </H5>

<P>Assessment for this unit could include reviewing students’ career plans and the goals they have set for themselves, as well as their participation in class discussions and activities. </P>

<P>This lesson plan should help Grade 10 learners to start their journey towards understanding different career options and the importance of planning and setting goals for their future careers. The challenge is for educators to critique the lesson plan from what is to what ought to be. </P>

<P>Facilitating Complex Learning </P>

<P>Previous research has highlighted the potential of AI tools in supporting the development of complex learning tasks, including language learning (Jia, Sun, Ma, &amp; Looi 2022:2, 3 of 18) and critical thinking (Hapsari &amp; Wu 2022:444). ChatGPT, as an ITS (intelligent tutoring system), shares this potential by offering personalised instruction and feedback to students regarding complex tasks such as academic writing skills (Zhai 2022:10 of 18) and programming skills (Biswas 2023:11). Additionally, ChatGPT has demonstrated its ability to stimulate critical thinking in students by presenting them with tailored sets of questions based on their proficiency level (Cotton et al. 2023:2, 5 of 12). </P>

<P>Acting as an intelligent conversational agent, ChatGPT also provides valuable opportunities for students to enhance their argumentation skills through low-stakes practices. For instance, students can present their points of view while asking ChatGPT to take the opposite side, engaging in a debate-like conversation (Bayat, Banihashem, &amp; Noroozi 2022:350). Moreover, like other pre-trained language representation models (cf. Jia, Cui, Xiao, Liu, Rashid, &amp; Gehringer 2021:6), ChatGPT can assist students in evaluating peer assessments, enabling them to enhance their feedback-giving abilities. </P>

<P>Decreasing Teaching Workload </P>

<P>ChatGPT possesses significant potential to reduce educators’ workloads substantially. As an example, it can be utilised as a feedback tool to guide students’ tasks, essays, and assignments (Qadir 2022:1). By engaging ChatGPT, educators can request the formation of diverse test formats, including open-ended questions, multiple-choice questions, or even rubrics for evaluating students’ assignments (Zhai 2022:1, 2, 6 of 18). In the realm of text-based courses, ChatGPT could be employed for the automated grading of assignments, offering a beneficial solution (Cotton et al. 2023:3 of 12). Additionally, educators can efficiently provide students with timely feedback on their essays (Mizumoto &amp; Eguchi 2023:2 of 40). </P>
</Sect>

<Sect>
<H5>Challenges of Online Assessment in Higher Education </H5>

<P>The emergence of ChatGPT and related chatbots offers the industry yet another chance to consider why and how it evaluates </P>

<P>– and what HE is for. ChatGPT poses an existential danger if the goal of HE is for students to learn and summarise a body of information, with certification obtained through assessment. If institutions are no longer able to firmly claim that the texts evaluated by scholars were actually written by their students, the market worth of credentials is directly endangered. </P>

<P>Even though ChatGPT and other LLMs have ample potential advantages for evaluation in HE, there are also some significant obstacles that they might present. The potential for plagiarism is one issue when utilising GPT 3 for HE assessments. AI essay-writing programmes are created to produce essays in response to prompts or other conditions. This implies that students might submit writings that are not their own, in this way cheating on their assignments by using these systems (cf. Dehouche 2021:18). This could ultimately result in a devaluation of degrees and undercuts HE’s core mission of challenging and educating students. </P>

<P>Another challenge is that some students could use GPT 3 to unfairly advantage themselves over others. For instance, if a student utilises GPT 3 to produce high-quality written assignments, they may have an unfair edge over their peers who lack access to the model. This situation could result in disparities in the evaluation process. </P>

<P>It may be challenging to differentiate between a student’s writing and the output created by a chatbot application. Educators might struggle to effectively evaluate the student’s grasp of the material when they employ a chatbot to answer their questions. This is because the replies generated by the chatbot do not truly reflect the student’s comprehension level (Cotton et al. 2023:3 of 12). </P>
</Sect>
</Sect>

<Sect>
<H4>Discussion and Strategies on the Use of ChatGPT in Online Assessment </H4>

<P>An essential component of HE is assessment, used to gauge students’ learning and development. A vast array of learning outcomes, including knowledge, abilities, and attitudes can be evaluated using a variety of assessment formats, such as exams, papers, projects, and presentations. Developing customised tests is one possible use for GPT 3 in HE. According to each student’s unique requirements and skills, GPT 3 could be used to create tailored tests or quizzes (Barber et al. 2021:24; Zawacki-Richter, Marín, Bond, &amp; Gouverneur 2019:4). GPT 3 could be used to develop questions that are specific to each student’s level of proficiency and that challenge them to exhibit their knowledge and skills, which could be especially helpful in courses that emphasise language skills or critical thinking (Bommasani, Hudson, Adeli, Altman, Arora, Von Arx, Bernstein, Bohg, Bosselut, Brunskill, 
<Link>Brynjolfsson</Link>
, Buch, Card, Castellon, Chatterji, Chen, Creel, Davis, Demszky, Donahue, Doumbouya, Durmus, Ermon, Etchemendy, Ethayarajh, Fei-Fei, Finn, Gale, 
<Link>Gillespie</Link>
, Goel, Goodman, Grossman, Guha, Hashimoto, Henderson, Hewitt, Ho, Hong, Hsu, Huang, Icard, Jain, Jurafsky, Kalluri, Karamcheti, Keeling, Khani, Khattab, Koh, Krass, Krishna, Kuditipudi, Kumar, Ladhak, Lee, Lee, Leskovec, Levent, Li, Li, Ma, Malik, Manning, Mirchandani, Mitchell, Munyikwa, Nair, Narayan, Narayanan, Newman, Nie, Niebles, Nilforoshan, Nyarko, Ogut, Orr, Papadimitriou, Park, Piech, Portelance, Potts, Raghunathan, Reich, Ren, Rong, Roohani, Ruiz, Ryan, Ré, Sadigh, Sagawa, Santhanam, Shih, Srinivasan, Tamkin, Taori, Thomas, Tramèr, 
<Link>Wang, Wang, Wu, Wu, Wu, Xie, Yasunaga, You, Zaharia, Zhang, </Link>
Zhang, Zhang, Zhang, Zheng, Zhou, &amp; Liang 2021:29). </P>

<P>Developing interactive, game-based tests is another possible use for GPT 3 in HE. Chatbots or virtual assistants that push students to solve challenges or respond to enquiries using natural language interaction could be developed using GPT 3. In addition to teaching students useful communication and problem-solving skills, this might be an entertaining and exciting opportunity for them to demonstrate their knowledge and abilities. Students can receive personalised answers to their enquiries and instant feedback from chatbot applications. These chatbots may also personalise the educational process by suggesting books and websites based on students’ interests and requirements. To help students better understand the topic, chatbot applications can also offer educational resources like study guides and lecture notes (Perez, Massey-Allard, Butler, Ives, Bonn, Yee, &amp; Roll 2017:290; Cotton et al. 2023:2, 7 of 12). </P>

<P>A more effective and individualised learning experience could be made possible by using ChatGPT to assess assignments and give students feedback in real-time (Gao 2021:322-324; Cotton et al. 2023:3, 5 of 12; Zawacki-Richter et al. 2019:4). GPT 3, for instance, might be used to grade essays and other written work, freeing up educators to concentrate on higher-level duties like giving students feedback and support. Additionally, GPT 3 could be used to grade tests or quizzes more rapidly and precisely, giving students feedback in real-time (Cotton et al. 2023:2 of 12). </P>

<P>Due to the difficulties in assessing student submissions generated by ChatGPT, there are various strategies that educators can employ to address these issues. As a starting point, instructors can offer comprehensive and explicit guidelines to students organising their assignments (Barile 2023). They might also utilise straightforward and specific language, reiterate instructions detailing the objective of the task, and include examples. This approach can help to ensure that assignments are composed in a more organised and logical fashion. Additionally, educators can implement a rubric to assess the quality of students’ work. Rubrics establish a clear set of evaluation criteria for measuring performance across various activities, including assignments, presentations, and overall productivity. Rubrics typically include criteria, performance levels, and descriptors (Leadership Workshop 2012). This approach aids in accurately evaluating a student’s effort and comprehension of the subject matter. Additionally, educators are able to apply automated and manual assessment methods to gauge the student’s grasp of the material (Cotton et al. 2023:5 of 12). This ensures that the true extent of the student’s understanding is effectively assessed. </P>

<P>Additionally, academic personnel should search for linguistic abnormalities or patterns: Because chatbots frequently have low language skills, they may produce more human-like content, contain repetitious words or phrases, or use strange or inconsistent terminology. One way to determine whether a chatbot created a piece of writing is to look at its language (Sümer 2023). </P>

<P>Chatbots cannot generate novel concepts or conduct original research. Consequently, it is doubtful that material produced by a chatbot will contain accurate citations or references to sources (Gewirtz 2023). One way to determine if a chatbot wrote a piece of work is to look at its sources and citations. Educators therefore ought to verify originality. Chatbots cannot produce original work. As a result, anything produced by a chatbot is probably going to be rather similar to previously published sources. It is possible to determine whether a chatbot most likely created a piece of writing by using plagiarism detection software or manual examination to check for originality (Gewirtz 2023). </P>

<P>Although LLMs can produce writing that makes sense, they might only sometimes do so. Examining the essay for factual mistakes or discrepancies may reveal that it was written by a machine (Cotton et al. 2023:5 of 12). The educator should also check grammar and spelling: AI-generated writing may be more error-free than human writing, which may contain typos and grammatical faults. However, the quality of the LLM and the input data used to train it can affect this (Sümer 2023). </P>

<P>It is advisable to employ language analysis tools because some of them (like the GPT 2 output detector demo) are made to examine written work and identify patterns or abnormalities that could suggest that a chatbot created the work (Cotton et al. 2023:4 of 12; Sümer 2023). </P>

<P>When creating scenarios for real-world case study-based tests, academic users may also find it very useful, particularly if students utilise ChatGPT to create the unique situation to which their test would react (Saunders 2023). Since the exam is custommade and student-generated, all of these requirements help to ensure its validity and guard against plagiarism and other academic integrity violations. </P>

<P>A few crucial tactics can be used by university personnel to create tests that discourage or limit students’ use of ChatGPT. One strategy is to design tests that demand students to show their ability to think critically, address challenges, and communicate. For instance, educators could create exams that require students to participate in group debates, presentations, or other interactive activities that require applying their knowledge and skills instead of just asking them to write an essay on a given subject. This would encourage critical thinking and self-directed learning while making it more challenging for students to finish tasks using ChatGPT or other LLMs. </P>

<P>Accurately citing and referencing other people’s work is expected of academic writing, including in-text citations and a list of references after the work. This promotes the legitimacy and dependability of the research and aids in acknowledging the original writers. Because ChatGPT and other LLMs might not have access to the same information sources or could need to be configured to style citations and references correctly, their output might need to contain proper referencing. </P>

<P>Developing open-ended tests and promoting creativity and originality are further tactics. Educators could, for instance, provide tests that require from students to formulate their research topics, to formulate and support their positions, or to come up with unique ideas and exercise critical thought. As a result, students may find it more challenging to use ChatGPT to finish their assignments. Lastly, to ensure that students are not using LLMs during assessments, the educator should employ proctored or real-time exams (Cotton et al. 2023:5 of 12). </P>

<P>There are numerous intriguing instances of ChatGPT’s possible applications, fantastic usage, and benefits in the academic setting. Among them are the following: </P>

<L>
<LI>
<Lbl>• </Lbl>

<LBody>Automated grading and feedback: ChatGPT can score test responses, essays, and assignments and give students tailored feedback. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Question-answering tool: By allowing students to utilise ChatGPT to ask questions and get information about course materials, educators can save time by not having to repeatedly respond to the same questions. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Writing and language coaching: Students can enhance their language and writing abilities by using ChatGPT’s writing, grammar, and vocabulary recommendations. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Study aid: By offering explanations and responding to enquiries with remarkable speed and accuracy, ChatGPT can assist students to comprehend and retain information. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Group projects: By assisting students in working together on idea generation, research, planning, and organisation, ChatGPT can support group projects. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Presentation tool: ChatGPT can produce presentation slides and other materials, assisting students in effectively expressing their thoughts. </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>Virtual tutoring: For students who require more assistance, particularly those enrolled in online or distant learning environments, ChatGPT can offer virtual one-on-one tutoring (Hoyos 2023). ChatGPT is a tool that facilitates communication and can be used as a peer in education because learning can take place asynchronously, without people physically meeting in person. </LBody>
</LI>
</L>

<P>In light of these, educators could use ChatGPT in various ways to push students to acquire and hone their personal critical thinking abilities. </P>

<P>First, with questions like ‘How could you apply your past knowledge and abilities to address this new challenge?’ students are encouraged to think critically and solve difficulties by asking follow-up questions like ‘What strategies would you use to overcome any obstacles?’ Second, educators can routinely evaluate students’ strengths and areas for improvement by utilising ChatGPT to build quizzes and assessments that gauge students’ comprehension of the subject matter. Third, ChatGPT may create assignments that are differentiated based on the requirements and skills of each student, giving them progressively more complex assignments to push them. To provide students with high-quality examples, ChatGPT can also generate samples of excellent responses to tasks and assignments (Viljoen 2023). </P>

<P>Students in HE must actively engage with the material, critically reflect on their experiences and perspectives, relate the material to their personal and professional objectives, and synthesise new ideas and information to make meaning from it. Writing about their comprehension, applying what they have learnt to real-world situations, making visual aids or summaries, and discussing the learning material with others are some of the activities. Additionally, students’ perspectives can be widened and their knowledge deepened by actively searching and exposing them to various viewpoints. </P>

<P>LLMs significantly impact traditional teaching and learning approaches, and this development has led to a shift towards more experiential and student-centred learning approaches. Project-based learning is one example, which pushes students to use their knowledge and abilities to address challenges and complete projects in the real world. As a result, project-based learning gives students a context for their abilities and teaches them how to use them in practical situations. </P>

<P>Collaborative learning is another idea that is accomplished through teamwork and group projects. Students can use their knowledge and learn from one another in a dynamic and encouraging collaborative environment. Through practical and hands-on experiences like field excursions, job placements, and simulations, experiential learning (work-integrated learning) techniques enable students to apply their knowledge in real-world situations. </P>

<P>Finally, since it can be argued that technology can be used as a tool to facilitate learning and problem solving, incorporating technology into the educational process –such as gamification, AI, VR (virtual reality), and AR (augmented reality) – is an additional component in achieving this change (Viljoen 2023). </P>
</Sect>

<Sect>
<H4>Responses from Questions Posted to Online Educators: Strategies for Online Assessment </H4>

<P>The author sent five questions about ChatGPT and online assessment to 10 online educators. The answers can be summarised as follows: </P>

<Sect>
<H5>How can integrating ChatGPT in online assessments enhance the assessment of education and training programmes? </H5>

<P>ChatGPT can provide personalised and interactive student assessment experiences through the chatbot interface. The chatbot interface allows for real-time engagement in which students can demonstrate their knowledge and skills. ChatGPT can also simulate different types of classroom scenarios and can enable students to practise their instructional strategies and classroom management skills. This will provide a realistic and immersive assessment of their readiness to become effective educators. Furthermore, ChatGPT can offer immediate feedback – ChatGPT offers diverse perspectives when giving feedback. It can quickly assist both students and educators with quick prompts. </P>
</Sect>

<Sect>
<H5>What are the potential benefits and drawbacks of using ChatGPT for online assessment in education? </H5>

<P>Using ChatGPT offers the following benefits: First, it can provide immediate and consistent feedback to students and enhance their learning experience. Second, it allows for assessing a wide range of skills, including communication, problem-solving, and critical thinking, which are crucial for effective teaching. Third, it can reduce the administrative burden on educators by automating grading and assessment processes. </P>

<P>The drawbacks may be the potential for biases in AI scoring that may not accurately reflect an educator’s abilities to assess students. Also, relying solely on LLMs for assessment may undermine the importance of human judgement and the contextual understanding educators must apply in real-life classroom settings. Moreover, ChatGPT might not consider the African context or perspective in which this continent’s educators operate. While it can be a valuable resource for fostering critical thinking skills, educators and tutors should be mindful that the chatbot’s knowledge might not fully align with the specific cultural, social, and educational nuances prevalent in South Africa and other African countries. It is crucial for educators and tutors to supplement ChatGPT’s insights with localised and culturally relevant content and discussions, ensuring that educator training programmes are inclusive and attuned to the unique needs and challenges faced by educators in the African context. </P>
</Sect>

<Sect>
<H5>How can ChatGPT effectively assess and evaluate the critical thinking skills of educators teaching students in online settings? </H5>

<P>ChatGPT can provide various scenarios or case studies related to teaching and education. Students can use ChatGPT as a conversational agent to discuss and analyse the provided scenarios or case studies and demonstrate their critical thinking skills. Educators and tutors can use ChatGPT to pose open-ended questions or address challenges for students using essential strategies of thinking. By analysing the responses and interactions using ChatGPT, educators and tutors can assess students’ ability to think critically, analyse information, and provide well-reasoned arguments or solutions. The assessment should have assessment criteria that can be used to evaluate the responses of the student. </P>
</Sect>

<Sect>
<H5>What ethical considerations should educators consider when utilising ChatGPT for online assessment in educator training programmes? </H5>

<P>Educators should be aware of potential biases embedded in AI models, as these biases may unfairly advantage or disadvantage certain students, perpetuating existing inequalities. Ensuring that the AI system is trained on diverse and representative data is crucial to minimise bias. Second, privacy concerns arise when collecting and analysing data from students. Educators must obtain informed consent, protect sensitive information, and establish apparent data retention and usage policies to safeguard privacy. </P>

<P>Moreover, using LLMs may depersonalise the assessment process, potentially diminishing the importance of individualised feedback and human interaction in education. Educators must strike a balance between AI-driven assessment and personalised mentoring to maintain the quality of educator training. Additionally, transparency is vital. Educators should disclose the use of LLMs and its role in assessment to students, fostering trust and ensuring that they understand how their performance is evaluated. </P>

<P>Lastly, educators must continuously monitor and evaluate the LLM’s performance to address emerging ethical concerns and adapt their practices accordingly. Overall, ethical considerations in using ChatGPT for online assessment in educator training programmes require a thoughtful and proactive approach to ensure fairness, privacy, transparency, and the preservation of personalised education. </P>
</Sect>

<Sect>
<H5>How can the use of ChatGPT in online assessment contribute to the professional development of aspiring educators and improve the quality of their training? </H5>

<P>ChatGPT can offer immediate and personalised feedback, enabling students to identify areas for improvement in real time. This continuous feedback loop fosters a more responsive learning process. It can also simulate various classroom scenarios, allowing trainees to practise their teaching skills in a safe and controlled environment. This experiential learning boosts students’ confidence and helps them to develop effective classroom management techniques. Additionally, ChatGPT can assess and guide soft skills such as communication, empathy, and adaptability, which are critical for successful teaching but are often challenging to evaluate objectively. Furthermore, the scalability of online assessment with ChatGPT means that a larger pool of aspiring educators can benefit from high-quality training resources and assessments. </P>

<P>The data collected from these assessments can inform programme improvements, ensuring that educator training is responsive to evolving educational needs. ChatGPT’s integration into online assessment has the potential to transform education, offering more tailored, immersive, and data-driven training experiences that can significantly improve the quality and effectiveness of educator training programmes. </P>
</Sect>
</Sect>

<Sect>
<H4>Conclusion </H4>

<P>Integrating advanced LLMs like ChatGPT into online assessments holds immense potential to revolutionise how we evaluate knowledge and skills. From adaptive assessments to automated grading and feedback, these models can enhance student and educator assessment experiences. However, careful attention must be given to addressing challenges such as bias, reliability, security, and privacy. Collaboration between researchers, educators, and policymakers is necessary to leverage the benefits of LLMs while upholding ethical standards and ensuring the fairness and validity of online assessments. </P>

<P>Instead of marking the conclusion of HE, ChatGPT has offered the sector, and society at large, a valuable opportunity. This is a chance to create innovative and inclusive approaches to teaching, learning, and assessment that correspond with these new understandings. Additionally, many scholars argue that educators should integrate new technologies into the educational experience. Universities and faculty members can and ought to do just that with Gen-AI technology. They should investigate it, identify its limitations, evaluate its potential applications within the relevant disciplines or teaching contexts, and engage in discussions about all these aspects with students, who are likely already familiar with the technology. </P>
</Sect>

<Sect>
<H4>References </H4>

<P>Ahmad, U. 2023. The potential of ChatGPT for personalized learning and </P>

<P>
<Link>adaptive tutoring systems. Available at: https://www.linkedin. </Link>
</P>

<P>
<Link>com/pulse/potential-chatgpt-personalized-learning-adaptive</Link>
</P>

<P>
<Link>tutoring-usman-ahmad/</Link>
. (Accessed on 6 June 2023). </P>

<P>AlZu’bi, S., Mughaid, A., Quiam, F., &amp; Samar H. 2023. Exploring the capabilities and limitations of ChatGPT and alternative big language models. Artificial Intelligence and Applications 20:1-16. 
<Link>https://doi.org/10.47852/bonviewAIA3202820 </Link>
</P>

<P>Bayat, M., Banihashem, SK., &amp; Noroozi, O. 2022. The effects of collaborative reasoning strategies on improving primary school students’ argumentative decision-making skills. The Journal of Educational Research
<Link> 115(6):349-358. https://doi.org/10.1080/002 </Link>

<Link>20671.2022.2155602 </Link>
</P>

<P>Barber, M., Bird, L., Fleming, J., Titterington-Giles, E., Edwards, E., &amp; Leyland, C. 2021. Gravity assist: Propelling higher education 
<Link>towards a brighter future. Available at: https://ofslivefs.blob. </Link>

<Link>core.windows.net/files/Gravity%20assist/Gravity-assist-DTL</Link>

<Link>finalforweb.pdf</Link>
. (Accessed on 12 October 2023). </P>

<P>Barile, N. 2023. A guide to giving clear instructions to students (that they will actually follow). Available at: https://www.wgu.edu/ heyteach/article/guide-giving-clear-instructions-studentsthat-they-will-actually-follow2001.html#:~:text=After%20 giving%20them%20directions%2C%20ask%20your%20 students%20to,can%20help%20them%20comprehend%20 and%20complete%20the%20task. (Accessed on 7 June 2023). </P>

<P>Bekker, M. 2024. How academics can counter ‘AI thinks, therefore I am.’ University World News
<Link>. Available at: https://www. </Link>

<Link>universityworldnews.com/post.php?story=20240207185336305</Link>
. (Accessed on 8 February 2024). </P>

<P>Bhosale, SS., Pujari, V., Pujari, V., &amp; Multani, Z. 2020. Advantages and 
<Link>disadvantages of artificial intelligence. Available at: https:// </Link>

<Link>www.researchgate.net/publication/344584269_Advantages_ </Link>

<Link>And_Disadvantages_Of_Artificial_Intellegence</Link>
. (Accessed on 6 June 2023). </P>

<P>Biswas, S. 2023. Role of ChatGPT in computer programming. Mesopotamian Journal of Computer Science 
<Link>2023:8-16. https://doi. </Link>

<Link>org/10.58496/MJCSC/2023/002 </Link>
</P>

<P>
<Link>Bommasani, R., Hudson, DA., Adeli, E., Altman, R., Arora, S., Von Arx, S., </Link>

<Link>Bernstein, MS., Bohg, J., Bosselut, A., Brunskill, E., Brynjolfsson, </Link>

<Link>E., Buch, S., Card, D., Castellon, R., Chatterji, N., Chen, A., Creel, </Link>

<Link>K</Link>

<Link>., Davis, JQ., </Link>

<Link> Demszky, D., Donahue, C., Doumbouya, M., </Link>

<Link>Durmus, E., Ermon, S., Etchemendy, J., Ethayarajh, K., Fei-</Link>

<Link>Fei, L., Finn, C., Gale, T., Gillespie, L., Goel, K., Goodman, N., </Link>

<Link>Grossman, S., Guha, N., Hashimoto, T., Henderson, P., Hewitt, J., </Link>

<Link>Ho, DE., Hong, J., Hsu, K., Huang, J., Icard, T., Jain, S., Jurafsky, </Link>

<Link>D</Link>

<Link>., Kalluri, P., Karamcheti, S., Keeling, G., Khani, F., Khattab, </Link>

<Link>O</Link>

<Link>., Koh, PW., Krass, M., Krishna, R., Kuditipudi, R., Kumar, A., </Link>

<Link>Ladhak, F., Lee, M., Lee, T., Leskovec, J., Levent, I., Li, XL., Li, </Link>

<Link>X</Link>

<Link>., Ma, T., Malik, A., Manning, CD., Mirchandani, S., Mitchell, </Link>

<Link>E</Link>

<Link>., Munyikwa, Z., Nair, S., Narayan, A., Narayanan, D., Newman, </Link>

<Link>B</Link>

<Link>., Nie, A., Niebles, JC., Nilforoshan, H., Nyarko, J., Ogut, G., Orr, </Link>

<Link>L</Link>

<Link>., Papadimitriou, I., Park, JS., Piech, C., Portelance, E., Potts, C., </Link>

<Link>Raghunathan, A., Reich, R., Ren, H., Rong, F., Roohani, Y., Ruiz, </Link>

<Link>C</Link>

<Link>., Ryan, J., Ré, C., Sadigh, D., Sagawa, S., Santhanam, K., Shih, </Link>

<Link>A</Link>

<Link>., Srinivasan, K., Tamkin, A., Taori, R., Thomas, AW., Tramèr, F., </Link>

<Link>Wang, RE., Wang, W., Wu, B., Wu, J., Wu, Y., Xie, SM., Yasunaga, </Link>

<Link>M., You, J., Zaharia, M., Zhang, M., Zhang, T., Zhang, X., Zhang, </Link>

<Link>Y</Link>

<Link>., Zheng, L., Zhou, K., &amp; Liang, P. 2021. On the opportunities </Link>

<Link>and risks of foundation models. Available at: https://arxiv.org/ </Link>

<Link>pdf/2108.07258.pdf</Link>
. (Accessed on 7 June 2023). </P>

<P>Brown, TB., Mann, B., Ryder, N., Subbiah, M., Kaplan, J., Dhariwal, P., Neelakantan, A., Shyam, P., Sastry, G., Askell, A., Agarwal, S., Herbert-Voss, A., Krueger, G., Henighan, T., Child, R., Ramesh, A., Ziegler, DM., Wu, J., Winter, C., Hesse, C., Chen, M., Sigler, E., Litwin, M., Gray, S., Chess, B., Clark, J., Berner, C., McCandlish, S., Radford, A., Sutskever, I., &amp; Amodei, D. 2020. Language 
<Link>models are few-shot learners. Available at: https://arxiv.org/ </Link>

<Link>pdf/2005.14165.pdf</Link>
. (Accessed on 6 June 2023). </P>

<P>Cascella, M., Montomoli, J., Bellini, V., &amp; Bignami, E. 2023. Evaluating the feasibility of ChatGPT in healthcare: An analysis of multiple clinical and research scenarios. Journal of Medical Systems 
<Link>47(33):1-5. https://doi.org/10.1007/s10916-023-01925-4 </Link>
</P>

<P>Celik, I., Dindar, M., Muukkonen, H., &amp; Järvelä, S. 2022. The promises and challenges of artificial intelligence for teachers: A systematic review of research. TechTrends
<Link> 66:616-630. https://doi. </Link>

<Link>org/10.1007/s11528-022-00715-y </Link>
</P>

<P>Cotton, DRE., Cotton, PA., &amp; Shipway, JR. 2023. Chatting and cheating: Ensuring academic integrity in the era of ChatGPT. Innovations in Education and Teaching International
<Link>. 12 pages. https://doi. </Link>

<Link>org/10.35542/osf.io/mrz8h </Link>
</P>

<P>Chowdhury, NA. &amp; Rahman, S. 2023. A brief review of ChatGPT: Limitations, challenges and ethical-social implications. Available 
<Link>at: https://www.researchgate.net/publication/368397881_A_ </Link>

<Link>brief_review_of_ChatGPT_Limitations_Challenges_and_ </Link>

<Link>Ethical-Social_Implications</Link>
. (Accessed on 6 June 2023). </P>

<P>Dehouche, N. 2021. Plagiarism in the age of massive generative pretrained transformers (GPT-3). Ethics in Science and Environmental Politics
<Link> 21:17-23. https://doi.org/10.3354/esep00195 </Link>
</P>

<P>Dheda, G. 2023. What does GPT stand for in Chat GPT: Everything you 
<Link>need to know. Available at: https://openaimaster.com/what</Link>

<Link>does-gpt-stand-for-in-chat-gpt/</Link>
. (Accessed on 6 June 2023). </P>

<P>Farrokhnia, M., Banihashem, SK., Noroozi, O., &amp; Wals, A. 2023. A SWOT analysis of ChatGPT: Implications for educational practice and research. Innovations in Education and Teaching International. 15 
<Link>pages. https://doi.org/10.1080/14703297.2023.2195846 </Link>
</P>

<P>Gao, J. 2021. Exploring the feedback quality of an automated writing evaluation system Pigai. International Journal of Emerging Technologies in Learning 
<Link>16(11):322-330. https://doi.org/10.3991/ </Link>

<Link>ijet.v16i11.19657 </Link>
</P>

<P>Gewirtz, D. 2023. How to make ChatGPT provide sources and citations. 
<Link>Available at: https://www.zdnet.com/article/how-to-make</Link>

<Link>chatgpt-provide-sources-and-citations/</Link>
. (Accessed on 4 October 2023). </P>

<P>Gpt Generative Pretrained Transformer, Thunström, AO., &amp; Steingrimsson, S. 2022. Can GPT-3 write an academic paper on 
<Link>itself, with minimal human input? 7 pages. Available at: https:// </Link>

<Link>hal.archives-ouvertes.fr/hal-03701250/document</Link>
. (Accessed on 11 October 2023). </P>

<P>Hapsari, IP. &amp; Wu, TT. 2022. AI chatbots learning model in English speaking skill: Alleviating speaking anxiety, boosting enjoyment, and fostering critical thinking. In Huang, YM., Cheng, SC., Barroso, J., &amp; Sandnes, FE. (Eds): Innovative technologies and learning, 444-453. ICITL 2022. Lecture Notes in Computer 
<Link>Science, vol 13449. Cham: Springer. https://doi.org/10.1007/978</Link>

<Link>3-031-15273-3_49 </Link>
</P>

<P>Hoyos, A. 2023. Unpacking ChatGPT: The pros and cons of AI’s hottest 
<Link>language model. Available at: https://www.ie.edu/insights/ </Link>

<Link>articles/unpacking-chatgpt-the-pros-and-cons-of-ais</Link>

<Link>hottest-language-model/</Link>
. (Accessed on 12 October 2023). </P>

<P>Jia, Q., Cui, J., Xiao, Y., Liu, C., Rashid, P., &amp; Gehringer, E. 2021. ALL-INONE: Multi-task learning BERT models for evaluating peer 
<Link>assessments. Available at: https://arxiv.org/pdf/2110.03895.pdf. </Link>
(Accessed on 12 October 2023). </P>

<P>Jia, F., Sun, D., Ma, Q., &amp; Looi, CK. 2022. Developing an AI-based learning system for L2 learners’ authentic and ubiquitous learning in English language. Sustainability
<Link> 14(23). 15527. 18 pages. https:// </Link>

<Link>doi.org/10.3390/su142315527 </Link>
</P>

<P>Karthikeyan, C. 2023. Literature review on pros and cons of ChatGPT implications in education. International Journal of Science and Research 
<Link>12(13):283-291. https://doi.org/10.21275/SR23219122412 </Link>
</P>

<P>Leadership Workshop. 2012. Faculty evaluation rubrics. University of Washington Center for Instructional Change. Available at: https://advance.washington.edu/sites/advance/files/1a.%20 Faculty%20Evaluation%20Rubrics.pdf#:~:text=Rubrics%20 are%20often%20used%20in%20teaching%20to%20 assess,useful%20for%20evaluating%20faculty%20 candidates%20and%20faculty%20performance. (Accessed on 7 June 2023). </P>

<P>Lewis, A. 2022. Multimodal large language models for inclusive collaboration learning tasks. Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies: Student Research Workshop, Washington
<Link>, 202-210. Available at: https:// </Link>

<Link>aclanthology.org/2022.naacl-srw.26.pdf</Link>
. (Accessed on 12 October 2023). </P>

<P>Li, C. &amp; Xing, W. 2021. Natural language generation using deep learning to support MOOC learners. International Journal of Artificial Intelligence in Education
<Link> 31(2):186-214. https://doi.org/10.1007/ </Link>

<Link>s40593-020-00235-x </Link>
</P>

<P>Lin, F., Cao, W., Zhang, W., Chen, L., Hong, Y., Xu, T., &amp; Tan, C. 2022. Knowledge-enhanced retrieval: A scheme for question 
<Link>answering. Conference paper. Available at: https://link.springer. </Link>

<Link>com/chapter/10.1007/978-981-19-0713-5_12</Link>
. (Accessed on 12 October 2023). </P>

<P>MacGregor, K. 2023. ChatGPT – calm your inner Luddite, keep your inner sceptic. University World News
<Link>. Available at: https://www. </Link>

<Link>universityworldnews.com/post.php?story=20230302062634124</Link>
. (Accessed on 2 March 2023). </P>

<P>
<Link>Marr, B. 2023. The top 10 limitations of ChatGPT. Available at: https:// </Link>

<Link>www.forbes.com/sites/bernardmarr/2023/03/03/the-top-10</Link>

<Link>limitations-of-chatgpt/</Link>
. (Accessed on 6 June 2023). </P>

<P>Messer, M. 2022. Grading programming assignments with an automated grading and feedback assistant. Conference 
<Link>paper. Available at: https://link.springer.com/ </Link>

<Link>chapter/10.1007/978-3-031-11647-6_6</Link>
. (Accessed on 7 June 2023). </P>

<P>Mizumoto, A. &amp; Eguchi, M. 2023. Exploring the potential of using an AI language model for automated essay scoring. SSRN Electronic Journal
<Link>. 40 pages. https://doi.org/10.2139/ssrn.4373111 </Link>
</P>

<P>Perez, S., Massey-Allard, J., Butler, D., Ives, J., Bonn, D., Yee, N., &amp; Roll, </P>

<P>I. 2017. Identifying productive inquiry in virtual labs using sequence mining. In André, E., Baker, R., Hu, X., Rodrigo, MMT., &amp; Du Boulay, B. (Eds.): Artificial intelligence in education, 287-298. Proceedings of the 18th International Conference, AIED 2017. Wuhan, China, 28 June to 1 July 2017. Available at:
<Link>https://files.eric.ed.gov/ </Link>

<Link>fulltext/ED617869.pdf</Link>
. (Accessed on 7 June 2023). </P>

<P>Qadir, J. 2022. Engineering education in the era of ChatGPT: Promise 
<Link>and pitfalls of generative AI for education. Available at: https:// </Link>

<Link>www.researchgate.net/publication/366712815_Engineering_ </Link>

<Link>Education_in_the_Era_of_ChatGPT_Promise_and_Pitfalls_ </Link>

<Link>of_Generative_AI_for_Education</Link>
. (Accessed on 7 June 2023). </P>

<P>Rudolph, J., Tan, S., &amp; Tan, S. 2023. ChatGPT: Bullshit spewer or the end of traditional assessments in higher education? Journal of Applied Learning &amp; Teaching
<Link> 6(1):342-362. https://doi.org/10.37074/ </Link>

<Link>jalt.2023.6.1.9 </Link>
</P>

<P>Saunders, S. 2023. Rather than ban generative AI, universities must learn from the past. University World News
<Link>. Available at: https://www. </Link>

<Link>universityworldnews.com/post.php?story=20230302062634124</Link>
. (Accessed on 6 March 2023). </P>

<P>Sharma, Y. 2024. Generative AI prompts new areas of university 
<Link>research. Available from: https://www.universityworldnews. </Link>

<Link>com/post.php?story_20240129205809349&amp;utm_ </Link>

<Link>source=newsletter&amp;utm_medium=email&amp;utm_ </Link>

<Link>campaign=SPNL5008</Link>
. (Accessed on 3 February 2024). </P>

<P>Sümer, O. 2023. Artificial intelligence chatbots, how do they work? For which purposes can they be used? What are the benefits and 
<Link>potential pitfalls? Available at: https://www.omersumer.com/ </Link>

<Link>personal-writings/2023-03-21-ChatGPT-and-LLMs/index. </Link>

<Link>html</Link>
. (Accessed on 7 June 2023). </P>

<P>Viljoen, J. 2023. AI in higher education – a tool for better learning? University World News
<Link>. Available at: https://www. </Link>

<Link>universityworldnews.com/post.php?story=20230302062634124</Link>
. (Accessed on 2 March 2023). </P>

<P>Wong, C. 2023. Automatic grading with Azure OpenAI Services ChatGPT 
<Link>virtual assistant. Available at: https://techcommunity.microsoft. </Link>

<Link>com/t5/educator-developer-blog/automatic-grading-with</Link>

<Link>azure-openai-services-chatgpt-virtual/ba-p/3811231</Link>
. (Accessed on 6 June 2023). </P>

<P>Zawacki-Richter, O., Marín, VI., Bond, M., &amp; Gouverneur, F. 2019. Systematic review of research on artificial intelligence applications in higher education – where are the educators? International Journal of Educational Technology in Higher Education 
<Link>16(1):1-27. https://doi.org/10.1186/s41239-019-0171-0 </Link>
</P>

<P>Zhai, X. 2022. ChatGPT user experience: Implications for education. SSRN Electronic Journal
<Link>. 18 pages. Available at: https://ssrn.com/ </Link>

<Link>abstract=4312418</Link>
. 
<Link>http://dx.doi.org/10.2139/ssrn.4312418 </Link>
</P>

<Link><Figure id="LinkTarget_10763">

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_68.jpg"/>
</Figure>
</Link>
</Sect>
</Sect>
</Sect>

<Sect>
<H2>Chapter 9 </H2>

<P>Is ChatGPT Capable of Generating </P>

<Sect>
<H3>Accurate Information? Academics Employ Strategies to Prevent Academic Dishonesty at a Faculty of Education </H3>

<P>Micheal M van Wyk </P>

<Link><Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_69.jpg"/>
</Figure>
</Link>

<Link>
<P>Department of Education University of South Africa Pretoria, South Africa </P>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_70.jpg"/>
</Figure>
</Link>

<Sect>
<H4>Introduction </H4>

<P>AI is not a recent concept, as Isaac Asimov has already made his infamous robotic invention based on the Three laws of robotics and the science fiction book, I Robot (1950). Alan Turing extended AI research in the 1950s, which led UNESCO (United Nations Educational, Scientific, and Cultural Organisation) to </P>

<P>recommend ethical considerations for using AI tools to prevent abuse, fraud, and security risks. Based on the principles of AI </P>

<P>ethics, different AI tools were developed to minimise academic integrity risks. Those early years of exploring AI in HE (higher </P>

<P>education) opened many opportunities and challenges to </P>

<P>the sector. The rapid development of AI research is having a </P>

<P>profound impact on HE. AI solutions have great potential to </P>

<P>speed up progress towards the SDGs (sustainable development </P>

<P>goals) and the Africa Agenda 2063 vision. </P>

<P>Within the past decade, enormous strides have been made related to the effect of AI on education. According to Holmes, Bialik, and Fadel (2023:633), the application of Gen-AI (generative artificial intelligence) tools enhances education </P>

<Link><Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_71.jpg"/>
</Figure>
</Link>

<P>and has grown exponentially. Scholarly works have reported how Gen-AI affects tuition, research, assessment, ethics, and </P>

<P>professional development in the HE sector (Zawacki-Richter, </P>

<P>Marín, Bond, &amp; Gouverneur 2019:23). On the other hand, Nemorin, Vlachidis, Ayerakwa, and Andriotis (2023:39) doubt </P>

<P>the claims about the revolutionary potential of AI in education, stating that they are primarily based on conjecture, speculation, and optimism. Some scholars discuss whether Gen-AI tools can track learning progress in different situations and skills. However, at the time of exploring academic staff’s awareness of Gen-AI conversational tools in teaching and learning (Holmes </P>

<P>et al. 2023:633), there have been studies conducted at IHEs (institutions of higher education) globally (Smolansky, Cram, Raduescu, Zeivots, Huber, &amp; Kizilcec 2023; Peres, Schreier, </P>

<P>Schweidel, &amp; Sorescu 2023) but to a lesser extent in the African context (Baidoo-Anu &amp; Ansah 2023; Mhlanga 2023; Van Wyk, </P>

<P>Adarkwah, &amp; Amponsah 2023:216). In November 2022, a </P>

<P>tech company launched ChatGPT 3.5. This sophisticated AI-</P>

<P>based conversational tool created a global movement with the </P>

<P>characteristics of the infamous updated version, ChatGPT 3.5, which the tech company OpenAI developed. This app created a buzz around a Gen-AI tool that can mimic human responses to text prompts. ChatGPT grew faster than any other social media and became the fastest-growing app, but similar LLMs (large </P>

<P>language model) emerged, like Claude2 and Llama2, as chatbots </P>

<P>(Rudolph, Tan, &amp; Tan 2023). </P>

<P>ChatGPT’s advances have emphasised the importance of researching it in education. At a South African university, the author studied academic experiences with ChatGPT, a Gen-AIbased conversation tool. Some participants were exposed to and used Gen-AI tools like ChatGPT 3.5 to experiment with teaching and learning activities. This inquiry generated the following research questions: </P>

<L>
<LI>
<Lbl>• </Lbl>

<LBody>Is ChatGPT capable of generating accurate information? </LBody>
</LI>

<LI>
<Lbl>• </Lbl>

<LBody>What strategies do academics use to prevent academic dishonesty at a faculty of education? </LBody>
</LI>
</L>

<P>This study made three significant contributions to Gen-AIbased LLM research. Using Gen-AI in teaching is being studied, and it is contributing to that research. The study expands our understanding of Gen-AI, particularly ChatGPT 3.5 as a chatbotbased LLM in the faculty of education. Third, one should design strategies and create an awareness of detector tools to prevent academic dishonesty. Practical implications included policy revisions for ethics, tuition, and research. There is also a need for continuous professional development in the faculty of education. </P>
</Sect>

<Sect>
<H4>Literature Review </H4>

<Sect>
<H5>AI-based Research’s Impact on the Future of Education </H5>

<P>AI has made significant progress in education because of the pioneering work of Isaac Asimov and Allan Turing. This has led to using AI tools to promote advanced teaching and learning experiences. Launching the first LLM model created a hype around ChatGPT as a Gen-AI-based conversational tool. This led to the so-called chatbot war among tech companies – the infamous California Dollar Rush (Rudolph et al. 2023:370; Griffith &amp; Metz 2023:45). Several tech companies followed OpenAI as an example, focusing on what they could get out of the ‘dollar rush.’ Studies conducted on Gen-AI tools viewed these LLMs as potent technological tools which are tagged as a ‘friend yet as a foe’ (Lim, Gunasekara, Pallant, Pallant, &amp; Pechenkina 2023:13 of 13; Yang 2022:7 of 7). We do not know for sure if ChatGPT and related chatbots have caused a noteworthy change in teaching, learning, research, and ethics. However, more scientific inquiry must be conducted on its benefits and drawbacks. It is argued that Gen-AI is still in the infancy stage of research and development in education. Given the latter, studies are indicating that, for example, ChatGPT 3.5 and Claude2 could act on prompts in a ‘human’ manner but can also identify student learning gaps (Popenici &amp; Kerr 2017:9; Zawacki-Richter et al. 2019:25; Lim et al. 2023:13 of 13). Kaplan-Rakowski, Grotewold, Hartwick, and Papin (2023) have discovered that ChatGPT 3.5, a Gen-AI-powered LLM tool, can create targeted content and assist with writing tasks for educators and students. Studies have indicated that Gen-AI is very beneficial and has become very popular in education (Holmes et al. 2023:634). </P>
</Sect>

<Sect>
<H5>Academic Dishonesty and Cheating with Gen-AI-Based Tools </H5>

<P>The emergence of ChatGPT has created a hype about chatbot popularity as a ‘hot tech topic’ (Leaver &amp; Srdarov 2023:2 of 11; Taecharungroj 2023; Van Wyk et al. 2023). Several African studies have emerged on awareness of the Gen-AI tool that was created, and most academics and students have started using ChatGPT in teaching and learning (Baidoo-Anu &amp; Ansah 2023; Mhlanga 2023; Van Wyk et al. 2023:216). Faculties of education have witnessed exponential promise due to Gen-AI-based tools, which have been implemented in contact, blended, and ODL (open distance learning) contexts. It could be deduced that Gen-AI tools have brought a pedagogical shift in HE. Studies conducted in these contexts found that these tools can enhance student learning and teaching opportunities. For example, Mohamed’s study reveals how educators view the efficacy of ChatGPT in teaching EFL (English foreign language) students (Mohamed 2024:3215). Research reports the advantages of using Gen-AI tools in fostering a student-centredness approach (Luan et al. 2020:9 of 11; Huang et al. 2022:238; Antonenko &amp; Abramowitz 2023:69). When Gen-AI tools are applied in this approach, personalised learning experiences are supported and enhanced (Abawi 2015:49; Chan &amp; Hu 2023:18 of 18; Li &amp; Wong 2023:423; Lodge, Thompson &amp; Corrin 2023:8). It also promotes students’ cognitive and emotional needs (Baek &amp; Kim 2023). Cope, Kalantzis, and Searsmith (2021:1231) have found that educators utilised creative techniques to assess student work in various subjects. </P>
</Sect>

<Sect>
<H5>Ethical Implications of Using Gen-AI Tools in Teaching and Learning </H5>

<P>As Gen-AI tools are accessible to academics and students, academic dishonesty, cheating, plagiarism, and copyright infringement have ethical consequences for education. IHEs are updating their policies on tuition, research, and assessments to maintain academic honesty, according to recent studies. Cooper’s study reveals that using copyrighted material in science education without permission can have a negative impact on academic integrity (Cooper 2023:447). To curb this challenge, academics must be empowered to use Gen-AI tools to raise awareness and set an example for students. Studies have already reported possible remedies for academics to model and act responsibly on the use of Gen-AI tools in their praxis (Ali 2023:5; Huallpa 2023:112; Dwivedi, Kshetri, Hughes, Slade, Jeyaraj, Kar, Baabdullah, Koohang, Raghavan, Ahuja, &amp; Albanna 2023:63 of 63; Cotton, Cotton, &amp; Shipway 2023:10; Kelly, Sullivan, &amp; Strampel 2023:14 of 16). </P>
</Sect>

<Sect>
<H5>Strategies to Prevent Academic Cheating and Plagiarism </H5>

<P>Some scholars argue that to protect the image and integrity of IHEs, specific measures must be developed to prevent cheating. This author believes that ethical considerations must be applied consistently and, if needed, policies related to integrity, copyright infringements, and praxis of academics must be revised. Given the latter, IHEs should implement strategies to prevent academic dishonesty. Literature provides measures and strategies that can be considered to prevent academic dishonesty of Gen-AI tools in teaching and learning. A study by Oravec (2023:215) found that specific cheating-detection strategies and AI-empowered skills could be used to support students in learning to deal with possible academic dishonesty or plagiarism. To prevent cheating, Sallam (2023:17 of 20), as well as Firaina and Sulisworo (2023:41) suggest that educators should present group projects, problem-based assignments, and case studies. In these assignments, students can present their findings together by writing and reporting. </P>

<P>There are other strategies suggested, like group discussions (Castillo, Silva, Arocutipa, Berrios, Rodriguez, Reyes, Lopez, Herbert, Rivera, &amp; Arias-Gonzáles 2023:11). Several studies have reported soft skills for students to succeed and found employability in an AI-dominated workplace (Reilly 2018:52; Baidoo-Anu &amp; Ansah 2023:57; Mukherjee 2023:124; Toli 2023:79). Scholars argue that soft skills such as social interaction skills (emotional self-regulation, collaborating with teams under pressure), creativity, curiosity (innate to explore, seek information to solve the problem, challenge assumptions), and critical thinking skills apply to AI generated information, logical reasoning, and reflective thinking (Chaudhry &amp; Kazim 2021:14; Mukherjee 2023:124; Toli 2023:79). Universities subscribed to several plagiarism detection software such as Turnitin and SafeAssign. Additionally, other GPT detectors such as GPTZero, ZeroGPT, GPT 2 output detector, CheckGPT, and Writefull are designed to combat outsourcing assessment and research tasks (Liu, Yao, Li, &amp; Luo 2023:23 of 23; Perkins 2023:14). However, with new LLMs such as ChatGPT, it is difficult for inexperienced faculty members and researchers to detect GPT-generated texts (Liu et al. 2023:22 of 23). </P>

<P>Many IHEs have recently employed plagiarism detection tools to promote novelty in writing and detect novel or unexpected ways of cheating or plagiarising (Canzonetta 2021:1). Before the launch of LLMs like ChatGPT 3.5, prior studies have revealed that anti-plagiarism software such as Turnitin and SafeAssign was used and reported as efficient in deterring plagiarism acts (Ledwith &amp; Risquez 2008:374; Canzonetta 2021:1; Perkins 2023:13; Shang 2019:112). </P>
</Sect>
</Sect>

<Sect>
<H4>Methodology </H4>

<P>Since universities became known with ChatGPT 3.5, they started with critical conversations about Gen-AI in education. For example, throughout 2023, at this mentioned African university, academics were introduced to online webinars, face-to-face seminars, college discussion forums, and virtual conferences, centred around Gen-AI tools, in particular ChatGPT 3.5 in HE. In one of the virtual Microsoft Teams videoconferencing sessions on ChatGPT 3.5 and detectors, an invitation was sent to academics to participate in an awareness study related to Gen-AI conversational tools. Based on this exposure, the participants agreed and signed a consent application to accept being a part of the study. Based on the notion of phenomenology, African scholars’ phenomenology is the science of experiences-as-lived and both a theory and a method used to explore academic awareness and experiences with Gen-AI tools by academics (Koopman &amp; Koopman 2018:16; Van Wyk 2022:124). Various research methods have been used to measure Gen-AI tools, but the chosen method was IPA (interpretative phenomenology analysis) for this study. Smith and Osborn (2008:67) argue that an IPA approach is conducted to create a narrative account for each participant through verbatim statements, views, and beliefs. For this study, the IPA is appropriate for exploring four academics’ lived experiences and how specific strategies to prevent cheating and academic dishonesty were applied in teaching and learning. The purposive sampling comprised academics. Pseudonym names were used, such as lecturers, senior lecturers, associate professors, and full professors, to protect the identity of participants. Dates and times were scheduled on Microsoft Teams to conduct the interviews. The interviews conducted on Microsoft Teams were transcribed. We then downloaded and secured the recordings as part of data management. An online link was sent to participants to verify the accuracy of recordings, transcripts, and questions. After a week, if participants agreed, an e-mail (no names to protect identification, codes as 
<Link>12345@unisa.ac.za</Link>
) was sent to the study’s author. The participants stated in their e-mailed responses that they agreed that the recordings, data generated, and transcripts were a true reflection. The e-mail served as proof of their participant validation and consent. </P>

<P>The data exploitation approach for conducting the IPA approach as provided by Hycner (1999) indicates how the simplified ‘explicitation’ strategy as an ‘investigation of constituents of the phenomenon while keeping the context of the whole’ (Hycner 1999:161) is used. The extracts were analysed and themes were identified as guided by the IPA analysis process (Hycner 1999:148; Creswell 2013:slide 23; Nowell, Norris, White, &amp; Moules 2017). The ethical clearance for the study was granted before it started (reference 2020/08/12/90178912/19/AM). </P>
</Sect>

<Sect>
<H4>Findings </H4>

<P>The results confirm that researchers conducted minimal research on ChatGPT as an AI conversational tool. Academics were aware of the potential benefits and drawbacks of ChatGPT for teaching and learning. Strategies were developed to prevent academic cheating in AI-based learning. </P>

<Sect>
<H5>Create an Awareness of Gen-AI Detector Tools </H5>

<P>After the launch of ChatGPT 3.5, students and academics agreed it is effortless to cheat or plagiarise with LLMs like ChatGPT or Barb. IHEs are compelling to empower staff and students to utilise Gen-AI tools for plagiarism detection, designed explicitly for AI-generated text. Participant 3 stated, ‘I informed my postgraduate students about the drawbacks of ChatGPT, but I exposed them to GPTZero as a Gen-AI detector software.’ </P>

<P>Universities introduced quality assurance practices and ethical considerations for postgraduate students to submit their chapters through Turnitin as a QA (quality assurance) mechanism to prevent academic cheating or plagiarism. Participant 4 narrated: ‘My doctoral students used Turnitin software, but the Gen-AI detector identifies whether part of the text is generated by any Gen-AI software.’ </P>
</Sect>

<Sect>
<H5>Designing Active and Participative Authentic Learning and Assessment Tasks </H5>

<P>Participants believed that measures need to be taken to prevent academic cheating. They viewed the purpose of using chatbots to generate content for students to be critically evaluated and reflected on. Some asserted that educators must adopt strategies to avoid cheating or academic dishonesty. They proposed creating case studies and group projects for students to report on. Participant 1 claimed: ‘Problem-based learning tasks are used by this person to prevent cheating, where students reflect, solve, and evaluate each task either individually or in a group. The associate professor sets criteria for grading tasks performed by one group, and evaluated by another. A professor created projects for students to do instead of taking a regular test.’ </P>
</Sect>

<Sect>
<H5>Exposing Students to Gen-AI Detection Tools </H5>

<P>Participants thought educating students about detector tools would discourage cheating and promote good behaviour. One participant (Participant 2) maintained that he exposed and empowered his students to use detector tools to combat cheating in his course. Participant 2 (female senior lecturer) asserted: ‘Since </P>

<P>ChatGPT was launched, I have exposed my students to this generative tool. This was my way to create an awareness that detection tools will be used as a preventive measure to discourage cheating.’ Participant 3 (a professor) echoed the same sentiment: ‘Several communications (social media platforms) were posted or sent by internal e-mail to staff and students that using generative software is not allowed. This is a measure to prevent academic dishonesty.’ </P>
</Sect>

<Sect>
<H5>Advocating Gen-AI-Chatbot Strategies </H5>

<P>Currently, different detector tools are available to prevent plagiarism. For some participants, advocating strategies to prevent cheating is the best way. They believe exposing students to detector software is vital to discourage cheating. Participant 4 (associate professor) narrated: ‘Before students submit an assessment task, a Gen-AI declaration is assigned. I explained the similarity in the text of each work. Ignorance is not an excuse, and cheating in my course can lead to my being deregistered.’ </P>

<P>However, some participants were positive and stated that embracing these generative tools is currently an excellent opportunity. Ethics and collective responsibility are essential for promoting chatbots in education. Participant 5 mentioned: ‘Inculcate a sense of ethical and collective responsibility among our students. I like the critical conversations about ethics and zero-tolerance of fraud using Gen-AI tools. I am of the view to protect our institution’s image, qualifications, and values.’ </P>
</Sect>

<Sect>
<H5>Create Meaningful and Authentic Learning Opportunities </H5>

<P>Participants opined that to prevent cheating among students, an awareness, adoption, and design of authentic learning experiences will ultimately change the usefulness of chatbots. Webinars on Gen-AI conversational tools are valuable strategies to increase awareness. We should create discussion forums and online panels to have critical conversations about detector software and inculcate a sense of the adoption of detector tools in teaching and learning. Participant 6 (associate professor) echoed: ‘Our college hosted several webinars on ChatGPT and other generative tools. To prevent cheating, assign research projects requiring students </P>

<P>to create their own data collection tools or write case studies requiring human problem-solving skills.’ </P>
</Sect>

<Sect>
<H5>Teaching Students that ChatGPT is Viewed as Truth or Fake </H5>

<P>Participants stated that they seldom heard conversations about the ethics of education and the importance of moral compasses. Some participants felt that technologies come with extra responsibilities and that ethics should be taught in every discipline. Another vital skill students should learn for future employability is asking better questions. Participant 7 (senior lecturer) discussed the importance ‘of avoiding inaccurate information from language models like ChatGPT in creating authentic learning opportunities. Can ChatGPT generate accurate content, or how far can we trust the information to be true? We must teach ourselves and the students that we graduate to do that.’ </P>
</Sect>

<Sect>
<H5>Empowering Students with 21st-Century Soft Skills in an AI-Dominated World for Future Employability </H5>

<P>The exponential growth in Gen-AI in education has changed HE’s strategic and operational objectives. Students must be empowered with soft skills to succeed in an AI-dominated workplace. These soft skills are social interaction skills (emotional self-regulation, collaborating with teams under pressure), creativity, curiosity (innate to explore, seek information to solve the problem, and challenge assumptions), and critical thinking skills (applied to AI-generated information, logical reasoning and reflective thinking). Participant 5 argued that ‘soft skills are important to prepare students for future employability. They believe that universities must rethink and reimage curricula and relevant academic programmes to equip students with problem-solving, critical thinking, emotional intelligence, communication, interpersonal relations, and technical skills to be relevant for employability in an AI-dominated landscape.’ </P>

<P>In the past decade, an increase in rapidly evolving AI and technological advancements has changed the requirements for the workplace. For IHEs, businesses, and governments to be relevant in these rapid changes in AI-dominated spaces, it becomes imperative to rethink the relevance of existing curricula. </P>

<P>As these AI-dominated tasks have changed the current and futuristic workplace, unique tasks done by humans have become obsolete. Participant 2 provided an example: ‘In one of my modules [design-based research], which focuses on academic writing and critical thinking, I used ChatGPT 3.5 to generate a 600-word text on a contemporary topic. For this activity, students must identify grammatical errors, in-text referencing, correctness of citations, analyse paraphrases of the text and present individually to the group. The group responded to each student’s presentation.’ </P>
</Sect>

<Sect>
<H5>Create Alternative Assessment Strategies for Meaningful and Authentic Learning Opportunities </H5>

<P>Academics revealed planned creative writing tasks. According to Participant 4, ‘it is vital to empower students in creative writing skills where students are asked to draw career timeline mapping and present each personal story on the career timeline mapping to the class.’ These assignments require original thought and personal expression and cannot be easily generated by an AI platform. Another authentic learning opportunity is the case study as an alternative strategy. Participant 7 claimed that as part of the creative writing skills course, students must apply learned skills and knowledge in real-world issues: ‘Students are instructed to plan and write a case study about any local community issue like sewerage as a serious environmental issue – ChatGPT generated text is not allowed.’ </P>
</Sect>
</Sect>

<Sect>
<H4>Discussion of Findings </H4>

<P>In this study, the participants were of the view that Gen-AI tools could be an advantage in teaching and learning. Chatbots support students with case studies, problem-solving, project-based learning, and academic writing using AI-conversational tools (Kaplan-Rakowski et al. 2023:30). Participants echoed sentiments of the usefulness of Gen-AI tools in promoting advancing teaching and learning experiences (Megahed, Chen, Ferris, Knoth, &amp; Jones-Farmer 2023:27 of 30; Castillo et al. 2023:11). According to Participant 8, ‘ChatGPT was very useful in developing academic writing skills for postgraduate students.’ Another issue that emerged from the interviews is that the disruptive Gen-AI conversational tools will likely change the face of HE immensely. Mohamed (2024:3208) reports that the lecturers viewed ChatGPT as efficient in teaching English Foreign Language students. Students were concerned about the cost and availability of AI tools for those who are disadvantaged, as companies were investing in them for profit. </P>

<P>Participants claimed that it is vital to be aware of and consider ethics when using AI chatbots. They felt academics must be cognisant of the speed of the emergence of new Gen-AI tools since the launch of ChatGPT 3.0 in 2022. Studies concur that the ethics of using Gen-AI tools by students is a major issue that needs to be addressed (Baek &amp; Kim 2023; Eke 2023:4 of 4; Cotton et al. 2023:12). Certain studies are expressing the fear of ethical implications, as cheating, plagiarism, and copyright infringement have ethical implications for education (Cooper 2023:449). The participants concurred that ethics is at the heart of preventing and detecting cheating or academic dishonesty. There were already debates about ethical issues like academic dishonesty and cheating due to using Gen-AI tools. There are AI-detection software to detect academic dishonesty, but these are expensive for HE (Oravec 2023:213). </P>

<P>IHEs must update policies and educate staff and students to prevent cheating with Gen-AI tools (Dwivedi et al. 2023:61 of 63). According to the participants, these are critical strategies against academic dishonesty. </P>

<P>Certain measures are proposed to prevent academic cheating using AI in teaching and learning. Participants believed it necessary to prevent academic dishonesty and adopted measures to combat fraud and cheating. Students are allowed to present case studies, research, and problem-based projects as a group and report the results (Firaina &amp; Sulisworo 2023:39). Participant 9 proposed an ‘alternative assessment by creating project-based learning tasks for students to plan, implement, and present their final projects.’ Studies advocate the use of AI chatbot strategies to prevent academic dishonesty (Ali 2023:4; Cotton et al. 2023:12). Several participants believed it is vital to expose students to detector software as a way to discourage cheating. Participants opined that to prevent cheating among students, awareness, adoption, and design of authentic learning experiences will ultimately change the usefulness of chatbots. Webinars on Gen-AI conversational tools are a valuable strategy to increase awareness. According to Castillo et al. (2023:10), discussion forums can be used as an effective, active method to create critical conversations about Gen-AI detector software to inculcate a sense of student adoption of detector tools. </P>

<P>Gen-AI tools had exponentially grown globally, so education had to change their teaching and learning. According to Castillo et al. (2023:16), ChatGPT had a significant impact on student learning. Scholars posit that soft skills like social interaction, creativity, curiosity, and reflective thinking are vital for futuristic workforce placement. Studies reported that students are empowered with soft skills to succeed in an AI-dominated workplace (Baidoo-Anu &amp; Ansah 2023:57; Mukherjee 2023:124; Reilly 2018:52; Toli 2023:79). Participant 5 argued that ‘soft skills are important to prepare students for future employability. Universities must rethink and reimage the relevant academic programmes to equip students with problem-solving, critical thinking, emotional intelligence, communication, interpersonal relations, and technical skills to be relevant for employability in an AI-dominated landscape.’ </P>
</Sect>

<Sect>
<H4>Conclusion </H4>

<P>Since the launch of the first LLM, several scholarly publications have reported benefits, but some scholars have raised concerns about ethics and morality. These LLMs have brought to light a significant pedagogical change in HE. This exploratory study revealed that academic staff created an awareness of the ethics of ChatGPT. Some participants raised a concern about whether ChatGPT is capable of generating accurate information because the world is full of inaccurate knowledge due to many chatbots. Participants believed that specific strategies could prevent academic dishonesty and cheating. The author of this study has suggested some strategies to prevent cheating with Gen-AI tools. Academics must redesign authentic context-based assessment tasks, such as specific case studies, problem-solving activities, and project-based learning tasks, and academic writing skills. </P>

<P>As suggested by participants, these strategies create hands-on learning tasks, introduce students to Gen-AI detection tools, and create authentic learning opportunities. The participants suggested that education faculties need to update AI policies, provide guidelines and educate students about AI literacy skills and AI detector tools for the ethical use of Gen-AI tools in teaching and learning. Furthermore, sentiments revealed that education must rethink and reimagine existing curricula to strategise by empowering student educators with soft skills to succeed in an AI-dominated workplace. </P>

<P>This study made significant contributions to advance an understanding of Gen-AI-based LLM research. This research indicates how Gen-AI can stop cheating in academia. It is suggested that awareness needs to be created using discussion forums and online panels as critical conversations about detector software like GPTZero. This study extends the theoretical knowledge of Gen-AI, in particular, an awareness of the ‘truth or fake’ of Gen-AI. The author recommended modifications to the education of educators, covering ethics, tuition, research, assessment, and professional development. Futuristic inquiry is to be conducted to rethink curricula to enhance the soft skills of student educators in preparing for an AI-dominated workplace. </P>
</Sect>

<Sect>
<H4>References </H4>

<P>Abawi, LA. 2015. Inclusion ‘from the gate in:’ Wrapping students with personalised learning support. International Journal of Pedagogies and Learning
<Link> 10(1):47-61. https://doi.org/10.1080/22040552.201 </Link>

<Link>5.1084676 </Link>
</P>

<P>Ali, F. 2023. Let the devil speak for itself: Should ChatGPT be allowed or banned in hospitality and tourism schools? Journal of Global Hospitality and Tourism
<Link> 2(1):1-6. https://doi.org/10.5038/2771</Link>

<Link>5957.2.1.1016 </Link>
</P>

<P>Antonenko, P. &amp; Abramowitz, B. 2023. In-service teachers’ (mis) conceptions of artificial intelligence in K-12 science education. Journal of Research on Technology in Education 55(1):64-78. 
<Link>https://doi.org/10.1080/15391523.2022.2119450 </Link>
</P>

<P>Baidoo-Anu, D. &amp; Ansah, LO. 2023. Education in the era of generative artificial intelligence (AI): Understanding the potential benefits of ChatGPT in promoting teaching and learning. Journal of AI 
<Link>7(1):52-62. http://dx.doi.org/10.2139/ssrn.4337484 </Link>
</P>

<P>Baek, TH. &amp; Kim, M. 2023. Is ChatGPT scary good? How user motivations affect creepiness and trust in generative artificial intelligence. Telematics and Informatics
<Link> 83. 102030. https://doi.org/10.1016/j. </Link>

<Link>tele.2023.102030 </Link>
</P>

<P>Canzonetta, J. 2021. Repurposing plagiarism detection services for responsible pedagogical application and (in)formative assessment of source attribution practices. Assessing Writing 50. 
<Link>100563. https://doi.org/10.1016/j.asw.2021.100563 </Link>
</P>

<P>Castillo, AGR., Silva, GJS., Arocutipa, JPF., Berrios, HQ., Rodriguez, MAM., Reyes, GY., Lopez, HRP., Herbert, RMVT., Rivera, VH., &amp; Arias-Gonzáles, JL. 2023. Effect of Chat GPT on the digitized learning process of university students. Journal of Namibian Studies: History Politics Culture
<Link> 33:1-15. https://doi.org/10.59670/jns.v33i.411 </Link>
</P>

<P>Chaudhry, MA. &amp; Kazim, E. 2021. Artificial intelligence in education (AIEd): A high-level academic and industry note. AI and Ethics 
<Link>2:1-16. https://doi.org/10.1007/s43681-021-00074-z </Link>
</P>

<P>Chan, CKY. &amp; Hu, W. 2023. Students’ voices on generative AI: Perceptions, benefits, and challenges in higher education. International Journal of Educational Technology in Higher Education 20. 43. 18 pages. 
<Link>https://doi.org/10.1186/s41239-023-00411-8 </Link>
</P>

<P>Cooper, G. 2023. Examining science education in chatbot: An exploratory study of generative artificial intelligence. Journal of Science Education and Technology
<Link> 32(3):444-452. https://doi.org/10.1007/ </Link>

<Link>s10956-023-10039-y </Link>
</P>

<P>Cotton, DRE., Cotton, PA., &amp; Shipway, JR. 2023. Chatting and cheating: Ensuring academic integrity in the era of ChatGPT. Innovations in Education and Teaching International
<Link> 61(2):1-12. https://doi.org/10 </Link>

<Link>.1080/14703297.2023.2190148 </Link>
</P>

<P>Cope, B., Kalantzis, M., &amp; Searsmith, D. 2021. Artificial intelligence for education: Knowledge and its assessment in AI-enabled learning ecologies. Educational Philosophy and Theory 53(12):1229-1245. 
<Link>https://doi.org/10.1080/00131857.2020.1728732 </Link>
</P>

<P>Creswell, JW. 2013. Steps in conducting a scholarly mixed methods study. Discipline-Based Education Research Group. Paper 48. PowerPoint: 52 slides. </P>

<P>Dwivedi, YK., Kshetri, N., Hughes, L., Slade, EL., Jeyaraj, A., Kar, AK., Baabdullah, AM., Koohang, A., Raghavan, V., Ahuja, M., &amp; Albanna, H. 2023. ‘So what if ChatGPT wrote it?’ Multidisciplinary perspectives on opportunities, challenges and implications of generative conversational AI for research, practice and policy. International Journal of Information Management
<Link> 71. 102642. 63 pages. https://doi.org/10.1016/j. </Link>

<Link>ijinfomgt.2023.102642 </Link>
</P>

<P>Eke, DO. 2023. ChatGPT and the rise of generative AI: A threat to academic integrity? Journal of Responsible Technology 13. 100060. 4 pages. 
<Link>https://doi.org/10.1016/j.jrt.2023.100060 </Link>
</P>

<P>Firaina, R. &amp; Sulisworo, D. 2023. Exploring the usage of ChatGPT in higher education: Frequency and impact on productivity. Buletin Edukasi Indonesia 2
<Link>(01):39-46. https://doi.org/10.56741/bei.v2i01.310 </Link>
</P>

<P>Griffith, E. &amp; Metz, C. 2023. ‘Let 1,000 flowers bloom:’ AI funding frenzy escalates. The New York Times
<Link>. Available at: https:// </Link>

<Link>www.nytimes.com/2023/03/14/technology/ai-funding-boom. </Link>

<Link>html?action=click&amp;module=RelatedLinks&amp;pgtype=Article</Link>
. (Accessed on 11 January 2024). </P>

<P>Holmes, W., Bialik, M., &amp; Fadel, C. 2023. Artificial intelligence in education. In Stückelberger, C. &amp; Duggal, P. (Eds.): Data ethics: building trust: how digital technologies can serve humanity, 621-653. Geneva: 
<Link>Globethics. https://doi.org/10.58863/20.500.12424/4276068 </Link>
</P>

<P>Huang, W., Hew, KF., &amp; Fryer, LK. 2022. Chatbots for language learning – are they really useful? A systematic review of chatbot-supported language learning. Journal of Computer Assisted Learning 
<Link>38(1):237-257. https://doi.org/10.1111/jcal.12610 </Link>
</P>

<P>Huallpa, JJ. 2023. Exploring the ethical considerations of using Chat GPT in university education. Periodicals of Engineering and Natural Sciences
<Link> 11(4):105-115. https://doi.org/10.21533/pen.v11.i4.200 </Link>
</P>

<P>Hycner, RH. 1999. Some guidelines for the phenomenological analysis of interview data. In Bryman, A. &amp; Burgess, RG. (Eds.): Qualitative research 3, 143-164. London: Sage. </P>

<P>Kaplan-Rakowski, R., Grotewold, K., Hartwick, P., &amp; Papin, K. 2023. Generative AI and teachers’ perspectives on its implementation in education. Journal of Interactive Learning Research 34(2/3):1333. </P>

<P>Kelly, A., Sullivan, M., &amp; Strampel, K. 2023. Generative artificial intelligence: University student awareness, experience, and confidence in use across disciplines. Journal of University Teaching &amp; Learning Practice
<Link> 20(6). 12. 16 pages. https://doi. </Link>

<Link>org/10.53761/1.20.6.12 </Link>
</P>

<P>Koopman, O. &amp; Koopman, KJ. 2018. The body as blind spot: Towards lived experience and a body-specific philosophy in education. Education as Change
<Link> 22(3):1-16. https://doi.org/10.25159/1947</Link>

<Link>9417/1880 </Link>
</P>

<P>Leaver, T. &amp; Srdarov, S. 2023. ChatGPT isn’t magic: The hype and hypocrisy of generative artificial intelligence (AI) rhetoric. M/C (Media &amp; Culture) Journal
<Link> 26(5). 11 pages. https://doi.org/10.5204/ </Link>

<Link>mcj.3004 </Link>
</P>

<P>Ledwith, A. &amp; Risquez, A. 2008. Using anti-plagiarism software to promote academic honesty in the context of peer reviewed assignments. Studies in Higher Education
<Link> 33(4):371-384. https:// </Link>

<Link>doi.org/10.1080/03075070802211562 </Link>
</P>

<P>Li, KC. &amp; Wong, BTM. 2023. Artificial intelligence in personalised learning: A bibliometric analysis. Interactive Technology and Smart Education
<Link> 20(3):422-445. https://doi.org/10.1108/ITSE-01-2023</Link>

<Link>0007 </Link>
</P>

<P>Lim, WM., Gunasekara, A., Pallant, JL., Pallant, JI., &amp; Pechenkina, E. 2023. Generative AI and the future of education: Ragnarök or reformation? A paradoxical perspective from management educators. The International Journal of Management Education
<Link> 21(2). 100790. 13 pages. https://doi.org/10.1016/j. </Link>

<Link>ijme.2023.100790 </Link>
</P>

<P>Liu, Z., Yao, Z., Li, F., &amp; Luo, B. 2023. Check me if you can: Detecting ChatGPT-generated academic writing using CheckGPT. 
<Link>arXiv:2306.05524. 23 pages. https://doi.org/10.48550/ </Link>

<Link>arXiv.2306.05524 </Link>
</P>

<P>Lodge, JM., Thompson, K., &amp; Corrin, L. 2023. Mapping out a research agenda for generative artificial intelligence in tertiary education. Australasian Journal of Educational Technology 39(1):1-8. </P>

<P>Luan, H., Geczy, P., Lai, H., Gobert, J., Yang, SJH., Ogata, H., Baltes, J., Guerra, R., Li, P., &amp; Tsai, C-C. 2020. Challenges and future directions of big data and artificial intelligence in education. Frontiers in Psychology
<Link> 11. 580820. 11 pages. https://doi. </Link>

<Link>org/10.3389/fpsyg.2020.580820 </Link>
</P>

<P>Mhlanga, D. 2023. Open AI in education, the responsible and ethical use of ChatGPT towards lifelong learning. In Mhlanga, D. (Ed.): FinTech and Artificial Intelligence for Sustainable Development: The role of smart technologies in achieving development goals, 387-409. 
<Link>Cham: Palgrave Macmillan. https://doi.org/10.1007/978-3-031</Link>

<Link>37776-1_17 </Link>
</P>

<P>Megahed, FM., Chen, YJ., Ferris, JA., Knoth, S., &amp; Jones-Farmer, LA. 2023. How generative AI models such as ChatGPT can be (mis)used in SPC practice, education, and research? An exploratory study. Quality Engineering
<Link>. arXiv:2302.109161v1. 30 pages. https://arxiv. </Link>

<Link>org/pdf/2302.10916.pdf</Link>
. </P>

<P>Mohamed, AM. 2024. Exploring the potential of an AI-based chatbot (ChatGPT) in enhancing English as a foreign language (EFL) teaching: Perceptions of EFL faculty members. Education and Information Technologies
<Link> 29: 3195-3217. https://doi.org/10.1007/ </Link>

<Link>s10639-023-11917-z </Link>
</P>

<P>Mukherjee, DV. 2023. At the edge of tomorrow: Unleashing human potential in the AI era. Chennai: Notion Press. </P>

<P>Nemorin, S., Vlachidis, A., Ayerakwa, HM., &amp; Andriotis, P. 2023. AI hyped? A horizon scan of discourse on artificial intelligence in education (AIED) and development. Learning, Media and Technology 
<Link>48(1):38-51. https://doi.org/10.1080/17439884.2022.2095568 </Link>
</P>

<P>Nowell, LS., Norris, JM., White, DE., &amp; Moules, NJ. 2017. Thematic analysis: Striving to meet the trustworthiness criteria. International Journal of Qualitative Methods
<Link> 16(1):1-13. https://doi. </Link>

<Link>org/10.1177/1609406917733847 </Link>
</P>

<P>Oravec, JA. 2023. Artificial intelligence implications for academic </P>

<P>cheating: Expanding the dimensions of responsible human-AI collaboration with ChatGPT. Journal of Interactive Learning Research 34(2):213-237. </P>

<P>Peres, R., Schreier, M., Schweidel, D., &amp; Sorescu, A. 2023. On ChatGPT and beyond: How generative artificial intelligence may affect research, teaching, and practice. International Journal of Research in Marketing
<Link> 40(2):269-275. https://doi.org/10.1016/j. </Link>

<Link>ijresmar.2023.03.001 </Link>
</P>

<P>Perkins, M. 2023. Academic integrity considerations of AI large language models in the post-pandemic era: ChatGPT and beyond. Journal of University Teaching &amp; Learning Practice
<Link> 20(2):1-23. https://doi. </Link>

<Link>org/10.53761/1.20.02.07 </Link>
</P>

<P>Popenici, SA. &amp; Kerr, S. 2017. Exploring the impact of artificial intelligence on teaching and learning in higher education. Research and Practice in Technology Enhanced Learning
<Link> 12(1):1-13. https://doi. </Link>

<Link>org/10.1186/s41039-017-0062-8 </Link>
</P>

<P>Reilly, P. 2018. The impact of artificial intelligence on the HR function. IES Perspectives
<Link> Member Paper 142, 41-58. Available at: https:// </Link>

<Link>www.employmentstudies.co.uk/system/files/resources/files/</Link>
. (Accessed on 25 April 2024). </P>

<P>Rudolph, J., Tan, S., &amp; Tan, S. 2023. War of the chatbots: Bard, Bing Chat, ChatGPT, Ernie and beyond. The new AI gold rush and its impact on higher education. Journal of Applied Learning and Teaching 
<Link>6(1):364-389. https://doi.org/10.37074/jalt.2023.6.1.23 </Link>
</P>

<P>Sallam, M. 2023. ChatGPT utility in healthcare education, research, and practice: Systematic review on the promising perspectives and valid concerns. Healthcare
<Link> 11(6). 887. 20 pages. https://doi. </Link>

<Link>org/10.3390/healthcare11060887 </Link>
</P>

<P>Shang, H-F. 2019. An investigation of plagiarism software use and awareness training on English as a foreign language (EFL) students. Journal of Computing in Higher Education 31(1):105-120. 
<Link>https://doi.org/10.1007/s12528-018-9193-1 </Link>
</P>

<P>Smith, J A. &amp; Osborn, M. 2008. Interpretative phenomenological analysis. In Smith, J. (Ed.): Qualitative psychology: A practical guide to research methods, 53-80. London: Sage. </P>

<P>Smolansky, A., Cram, A., Raduescu, C., Zeivots, S., Huber, E., &amp; Kizilcec, RF. 2023. Educator and student perspectives on the impact of generative AI on assessments in higher education. Proceedings of the Tenth ACM Conference on Learning @ Scale, 378-382. Available 
<Link>at: https://www.researchgate.net/publication/372482047_ </Link>

<Link>Educator_and_Student_Perspectives_on_the_Impact_of_ </Link>

<Link>Generative_AI_on_Assessments_in_Higher_Education</Link>
. (Accessed on 25 October 2023). </P>

<P>Taecharungroj, V. 2023. ‘What can ChatGPT do?’ Analyzing early reactions to the innovative AI chatbot on Twitter. Big Data and Cognitive Computing
<Link> 7(1). 35. 10 pages. https://doi.org/10.3390/ </Link>

<Link>bdcc7010035 </Link>
</P>

<P>Toli, L. 2023. The impact of AI technology on higher education in India: Revolutionizing learning and empowering students. International Journal of Multidisciplinary Educational Research 12(10):76-80. 
<Link>http://ijmer.in.doi./2023/12.10.92 </Link>
</P>

<P>Van Wyk, MM., Adarkwah, MA., &amp; Amponsah, S. 2023. Why all the hype about ChatGPT? Academics’ views of a chat-based conversational learning strategy at an open distance e-learning institution. Open Praxis
<Link> 15(3):214-225. https://doi.org/10.55982/ </Link>

<Link>openpraxis.15.3.563 </Link>
</P>

<P>Van Wyk, MM. 2022. Economics student teachers’ experiences-as-lived in a flipped pedagogical approach during crisis times. African Perspectives of Research in Teaching and Learning 6(3):121-139. </P>

<P>Yang, W. 2022. Artificial intelligence education for young children: Why, what, and how in curriculum design and implementation. Computers and Education: Artificial Intelligence 3. 100061. 7 pages. 
<Link>https://doi.org/10.1016/j.caeai.2022.100061 </Link>
</P>

<P>Zawacki-Richter, O., Marín, VI., Bond, M., &amp; Gouverneur, F. 2019. A systematic review of research on artificial intelligence applications in higher education – where are the educators? International Journal of Educational Technology in Higher Education 
<Link>16(1):1-27. https://doi.org/10.1186/s41239-019-0171-0 </Link>
</P>
<Figure>

<ImageData src="images/Artificial Intelligence Transforming Higher Education Vol 2_img_72.jpg"/>
</Figure>
</Sect>
</Sect>
</Sect>
</Part>
</TaggedPDF-doc>
