var gform;gform||(document.addEventListener("gform_main_scripts_loaded",function(){gform.scriptsLoaded=!0}),document.addEventListener("gform/theme/scripts_loaded",function(){gform.themeScriptsLoaded=!0}),window.addEventListener("DOMContentLoaded",function(){gform.domLoaded=!0}),gform={domLoaded:!1,scriptsLoaded:!1,themeScriptsLoaded:!1,isFormEditor:()=>"function"==typeof InitializeEditor,callIfLoaded:function(o){return!(!gform.domLoaded||!gform.scriptsLoaded||!gform.themeScriptsLoaded&&!gform.isFormEditor()||(gform.isFormEditor()&&console.warn("The use of gform.initializeOnLoaded() is deprecated in the form editor context and will be removed in Gravity Forms 3.1."),o(),0))},initializeOnLoaded:function(o){gform.callIfLoaded(o)||(document.addEventListener("gform_main_scripts_loaded",()=>{gform.scriptsLoaded=!0,gform.callIfLoaded(o)}),document.addEventListener("gform/theme/scripts_loaded",()=>{gform.themeScriptsLoaded=!0,gform.callIfLoaded(o)}),window.addEventListener("DOMContentLoaded",()=>{gform.domLoaded=!0,gform.callIfLoaded(o)}))},hooks:{action:{},filter:{}},addAction:function(o,r,e,t){gform.addHook("action",o,r,e,t)},addFilter:function(o,r,e,t){gform.addHook("filter",o,r,e,t)},doAction:function(o){gform.doHook("action",o,arguments)},applyFilters:function(o){return gform.doHook("filter",o,arguments)},removeAction:function(o,r){gform.removeHook("action",o,r)},removeFilter:function(o,r,e){gform.removeHook("filter",o,r,e)},addHook:function(o,r,e,t,n){null==gform.hooks[o][r]&&(gform.hooks[o][r]=[]);var d=gform.hooks[o][r];null==n&&(n=r+"_"+d.length),gform.hooks[o][r].push({tag:n,callable:e,priority:t=null==t?10:t})},doHook:function(r,o,e){var t;if(e=Array.prototype.slice.call(e,1),null!=gform.hooks[r][o]&&((o=gform.hooks[r][o]).sort(function(o,r){return o.priority-r.priority}),o.forEach(function(o){"function"!=typeof(t=o.callable)&&(t=window[t]),"action"==r?t.apply(null,e):e[0]=t.apply(null,e)})),"filter"==r)return e[0]},removeHook:function(o,r,t,n){var e;null!=gform.hooks[o][r]&&(e=(e=gform.hooks[o][r]).filter(function(o,r,e){return!!(null!=n&&n!=o.tag||null!=t&&t!=o.priority)}),gform.hooks[o][r]=e)}});
var breeze_prefetch = {"local_url":"https:\/\/ps-engage.com","ignore_remote_prefetch":"1","ignore_list":["wp-admin","wp-login.php"]};
The EU AI Liability Directive was originally intended to modernise current liability frameworks to address the unique challenges posed by AI systems. Notably, one of the stated goals of the EU AI Liability Directive was to lower the threshold of proving negligence, while also providing much needed clarity for developers and deployers of AI technologies in the EU. However, at the AI Action Summit in Paris (10-11 Feb 2025), the European Commission withdrew the AI Liability Directive from the list of legislative acts it was considering for 2025.
Though the EU AI Liability Directive is currently in limbo, the question remains: how should liability and AI systems be handled? Traditional liability frameworks, such as product and tort liability, are not able to account for harms arising from AI systems. This means that current frameworks need to be adapted to ensure that when an AI system causes harm, claimants must not bear an undue burden on proving fault.
Diversity brings fragmentation
This question presents a unique challenge for Asia. Some countries – like China and Korea – have binding rules (China’s Interim Measures for the Management of Generative AI Services Diverse Regulatory Models for AI Liability, and Korea’s AI Basic Act) that specify accountability standards for AI-related harm. For example, Korea’s AI Basic Act demands clear content labelling of generative AI output, and spells out the obligations on developers, and the enforcement mechanisms.
Conversely, countries such as Singapore, Japan, and Australia favour a soft, voluntary framework. For instance, Singapore’s Model AI Governance Framework offers best practices and ethical guidelines without immediate legal enforceability. This diversity reflects the region’s balancing act between maintaining oversight of AI and fostering innovation to support rapid economic growth. However, the diverse regulatory approaches to AI has a direct impact on concepts and bearers of liability, potentially leading to fragmentation and cross-border challenges.
Attribution and transparency challenges
Another major problem is the difficulty of attributing harm to an AI system. At this point, AI systems are “black boxes”: it is next to impossible to point to a part of AI systems and definitively conclude that that’s the source of harm. In certain types of AI, such as image and video generation tools, it would be relatively simple to identify when harmful content is generated. However, in automated decision-making systems, it would be far more complicated to identify the exact source of harm. Current legal concepts of liability principles – strict and fault-based liability in particular – must thus be updated to assign responsibility.
Some jurisdictions like Japan and Singapore have issued non-binding guidelines that encourage companies to institute internal governance protocols and conduct regular risk assessment exercises to manage these challenges. China has taken a more direct approach by requiring AI developers to explicitly label and insert metadata into generated content, making it easier to trace content origins and assign responsibility. Nonetheless, the technical challenges pose obstacles to how we can conclusively assign responsibility to AI.
Balancing innovation with accountability
There is a clear regional tension: on one side, there is a desire to protect public safety and uphold ethical standards on one hand – evident in China’s comprehensive AI laws and Vietnam’s draft Digital Technology Industry Law – and on the other, a need to preserve the region’s competitive edge in innovation. Policymakers are cautious not to impose overly burdensome requirements that could stifle tech development, especially in markets like India and Singapore where AI is seen as a critical driver for economic growth. Regional efforts such as the ASEAN Guide on AI Governance aim to harmonize these disparate approaches, seeking a common baseline for accountability without sacrificing the flexibility needed to nurture rapid technological advancement.
Moving forward
AI technologies inherently transcend national borders. Without a common set of standards, companies operating across different Asian markets are blanketed by a patchwork of regulations – each with its own definitions, risk classifications, and enforcement mechanisms.
Harmonization minimizes regulatory arbitrage, reduces compliance costs, and ensures that AI systems are developed under consistent safety and ethical criteria. For instance, initiatives like the ASEAN Guide on AI Governance are paving the way for a regional baseline that could eventually align with international standards such as the EU AI Act and the GDPR.
Furthermore, uniform standards allow companies to innovate confidently, knowing that a product meeting one set of rules can be more easily adapted for neighbouring markets. This consistency is particularly important in Asia because it helps create an integrated market for AI technologies.
Most importantly, harmonization brings consistency. Consumers and stakeholders will ultimately benefit from clearer expectations and legal thresholds about AI safety, transparency, and accountability. This standardized approach can help prevent scenarios where inconsistent regulations lead to public distrust of AI. Cross‑border cooperation on liability in AI systems also enables regional authorities to share best practices, conduct joint audits, and address emerging risks collectively, thus strengthening the overall governance framework for AI in Asia.
New Delhi became the centre of the global artificial intelligence (AI) conversation from 16–20 February as it hosted the AI Impact Summit, drawing policymakers, industry leaders, investors, and researchers from across the world. Building on the momentum created by the inaugural summit at Bletchley Park in the United Kingdom, the Delhi edition positioned itself differently. […]
From Trade Shock to Strategic Negotiation The Agreement on Reciprocal Trade signed in February 2026 between Indonesia and the United States was not initially driven by Indonesia’s intention to liberalize trade, but rather by external pressure. On April 2, 2025, the United States imposed unilateral tariffs of up to 32% on Indonesian goods, citing its […]
With the rapidly evolving AI world, the Southeast Asian region is pushing forward to lead the way. AI has been mainly viewed over the last few years as a technology trend, a productivity tool, or a startup opportunity. In 2026, the whole landscape is turning upside down with AI being considered as a national infrastructure. […]
jQuery(function(jQuery){jQuery.datepicker.setDefaults({"closeText":"Close","currentText":"Today","monthNames":["January","February","March","April","May","June","July","August","September","October","November","December"],"monthNamesShort":["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],"nextText":"Next","prevText":"Previous","dayNames":["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],"dayNamesShort":["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],"dayNamesMin":["S","M","T","W","T","F","S"],"dateFormat":"d MM, yy","firstDay":1,"isRTL":false});});
var gform_i18n = {"datepicker":{"days":{"monday":"Mo","tuesday":"Tu","wednesday":"We","thursday":"Th","friday":"Fr","saturday":"Sa","sunday":"Su"},"months":{"january":"January","february":"February","march":"March","april":"April","may":"May","june":"June","july":"July","august":"August","september":"September","october":"October","november":"November","december":"December"},"firstDay":1,"iconText":"Select date"}};
var gf_legacy_multi = [];
var gform_gravityforms = {"strings":{"invalid_file_extension":"This type of file is not allowed. Must be one of the following:","delete_file":"Delete this file","in_progress":"in progress","file_exceeds_limit":"File exceeds size limit","illegal_extension":"This type of file is not allowed.","max_reached":"Maximum number of files reached","unknown_error":"There was a problem while saving the file on the server","currently_uploading":"Please wait for the uploading to complete","cancel":"Cancel","cancel_upload":"Cancel this upload","cancelled":"Cancelled","error":"Error","message":"Message"},"vars":{"images_url":"https:\/\/ps-engage.com\/wp-content\/plugins\/gravityforms\/images"}};
var gf_global = {"gf_currency_config":{"name":"U.S. Dollar","symbol_left":"$","symbol_right":"","symbol_padding":"","thousand_separator":",","decimal_separator":".","decimals":2,"code":"USD"},"base_url":"https:\/\/ps-engage.com\/wp-content\/plugins\/gravityforms","number_formats":[],"spinnerUrl":"https:\/\/ps-engage.com\/wp-content\/plugins\/gravityforms\/images\/spinner.svg","version_hash":"047425350e49960f1a3faecec58999c4","strings":{"newRowAdded":"New row added.","rowRemoved":"Row removed","formSaved":"The form has been saved. The content contains the link to return and complete the form."}};
var gform_theme_config = {"common":{"form":{"honeypot":{"version_hash":"047425350e49960f1a3faecec58999c4"},"ajax":{"ajaxurl":"https:\/\/ps-engage.com\/wp-admin\/admin-ajax.php","ajax_submission_nonce":"0ea1c3fbb8","i18n":{"step_announcement":"Step %1$s of %2$s, %3$s","unknown_error":"There was an unknown error processing your request. Please try again."}}}},"hmr_dev":"","public_path":"https:\/\/ps-engage.com\/wp-content\/plugins\/gravityforms\/assets\/js\/dist\/","config_nonce":"4eaa449aad"};