{"id":43088,"date":"2026-02-25T10:42:54","date_gmt":"2026-02-25T10:42:54","guid":{"rendered":"https:\/\/imperix.com\/doc\/?p=43088"},"modified":"2026-02-25T10:42:55","modified_gmt":"2026-02-25T10:42:55","slug":"ann-based-control-of-a-three-phase-inverter","status":"publish","type":"post","link":"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter","title":{"rendered":"ANN-based control of a three-phase inverter"},"content":{"rendered":"<div id=\"ez-toc-container\" class=\"ez-toc-v2_0_82_2 ez-toc-wrap-right-text counter-hierarchy ez-toc-counter ez-toc-grey ez-toc-container-direction\">\n<div class=\"ez-toc-title-container\">\n<p class=\"ez-toc-title\" style=\"cursor:inherit\">Table of Contents<\/p>\n<span class=\"ez-toc-title-toggle\"><\/span><\/div>\n<nav><ul class='ez-toc-list ez-toc-list-level-1 ' ><li class='ez-toc-page-1 ez-toc-heading-level-2'><a class=\"ez-toc-link ez-toc-heading-1\" href=\"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter\/#Classification-of-ANNs\" >Classification of ANNs<\/a><\/li><li class='ez-toc-page-1 ez-toc-heading-level-2'><a class=\"ez-toc-link ez-toc-heading-2\" href=\"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter\/#MLP-FNN-architecture\" >MLP-FNN architecture<\/a><ul class='ez-toc-list-level-3' ><li class='ez-toc-heading-level-3'><a class=\"ez-toc-link ez-toc-heading-3\" href=\"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter\/#Mathematical-representation-of-FNN\" >Mathematical representation of FNN<\/a><\/li><li class='ez-toc-page-1 ez-toc-heading-level-3'><a class=\"ez-toc-link ez-toc-heading-4\" href=\"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter\/#Activation-functions\" >Activation functions<\/a><\/li><li class='ez-toc-page-1 ez-toc-heading-level-3'><a class=\"ez-toc-link ez-toc-heading-5\" href=\"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter\/#Network-topology-and-design-flexibility\" >Network topology and design flexibility<\/a><\/li><li class='ez-toc-page-1 ez-toc-heading-level-3'><a class=\"ez-toc-link ez-toc-heading-6\" href=\"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter\/#Training-algorithms\" >Training algorithms<\/a><\/li><li class='ez-toc-page-1 ez-toc-heading-level-3'><a class=\"ez-toc-link ez-toc-heading-7\" href=\"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter\/#Data-partitioning-and-validation\" >Data partitioning and validation<\/a><\/li><\/ul><\/li><li class='ez-toc-page-1 ez-toc-heading-level-2'><a class=\"ez-toc-link ez-toc-heading-8\" href=\"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter\/#Challenges-of-ANN-based-control\" >Challenges of ANN-based control<\/a><\/li><li class='ez-toc-page-1 ez-toc-heading-level-2'><a class=\"ez-toc-link ez-toc-heading-9\" href=\"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter\/#Experimental-implementation-of-an-ANN-based-control\" >Experimental implementation of an ANN-based control<\/a><ul class='ez-toc-list-level-3' ><li class='ez-toc-heading-level-3'><a class=\"ez-toc-link ez-toc-heading-10\" href=\"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter\/#Structure-of-FNN\" >Structure of FNN<\/a><\/li><li class='ez-toc-page-1 ez-toc-heading-level-3'><a class=\"ez-toc-link ez-toc-heading-11\" href=\"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter\/#Data-collection-and-preprocessing\" >Data collection and preprocessing<\/a><\/li><li class='ez-toc-page-1 ez-toc-heading-level-3'><a class=\"ez-toc-link ez-toc-heading-12\" href=\"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter\/#Training-and-deployment\" >Training and deployment<\/a><\/li><li class='ez-toc-page-1 ez-toc-heading-level-3'><a class=\"ez-toc-link ez-toc-heading-13\" href=\"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter\/#Tips-to-improve-the-ANN-performance\" >Tips to improve the ANN performance<\/a><\/li><\/ul><\/li><li class='ez-toc-page-1 ez-toc-heading-level-2'><a class=\"ez-toc-link ez-toc-heading-14\" href=\"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter\/#Implementation-of-FNN-based-controller\" >Implementation of FNN-based controller<\/a><ul class='ez-toc-list-level-3' ><li class='ez-toc-heading-level-3'><a class=\"ez-toc-link ez-toc-heading-15\" href=\"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter\/#Experimental-setup\" >Experimental setup<\/a><\/li><li class='ez-toc-page-1 ez-toc-heading-level-3'><a class=\"ez-toc-link ez-toc-heading-16\" href=\"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter\/#Downloads\" >Downloads<\/a><\/li><li class='ez-toc-page-1 ez-toc-heading-level-3'><a class=\"ez-toc-link ez-toc-heading-17\" href=\"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter\/#Experimental-results\" >Experimental results<\/a><\/li><\/ul><\/li><li class='ez-toc-page-1 ez-toc-heading-level-2'><a class=\"ez-toc-link ez-toc-heading-18\" href=\"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter\/#Conclusion\" >Conclusion<\/a><\/li><li class='ez-toc-page-1 ez-toc-heading-level-2'><a class=\"ez-toc-link ez-toc-heading-19\" href=\"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter\/#References\" >References<\/a><\/li><li class='ez-toc-page-1 ez-toc-heading-level-2'><a class=\"ez-toc-link ez-toc-heading-20\" href=\"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter\/#To-go-further-from-here%E2%80%A6\" >To go further from here&#8230;<\/a><\/li><\/ul><\/nav><\/div>\n\n<p>Conventional model-based control in power electronics relies heavily on deriving precise mathematical models of the physical system. In contrast, data-driven control shifts this paradigm by estimating control laws directly from empirical data. A key example of this approach is Artificial Neural Network (ANN)-based control. <a href=\"https:\/\/en.wikipedia.org\/wiki\/Neural_network_(machine_learning)\" type=\"link\" id=\"https:\/\/en.wikipedia.org\/wiki\/Neural_network_(machine_learning)\">ANNs<\/a> serve as universal function approximators that extract complex input\u2013output relationships directly from empirical or simulated datasets. Beyond mapping known parameters, ANNs are good at generalization, allowing them to accurately predict system behaviour across unseen operating conditions. This robustness is particularly advantageous for managing systems where the underlying dynamics are complex or defy explicit mathematical modelling. Additionally, their inherent structural flexibility makes them naturally suited for multi-variable systems [1]. While the foundational mathematics of ANNs were established in 1943, their application in power electronics has surged recently, becoming a dominant area of research [2].<\/p>\n\n\n\n<p>This technical note aims to replace the traditional inner-loop discrete PI current regulator with a lightweight feedforward neural network (FNN), a form of supervised machine learning. The FNN is designed to accurately reproduce the voltage-reference mapping within a standard <a href=\"https:\/\/imperix.com\/doc\/implementation\/grid-following-inverter?currentThread=static-synchronous-compensator-statcom\" type=\"link\" id=\"https:\/\/imperix.com\/doc\/implementation\/grid-following-inverter?currentThread=static-synchronous-compensator-statcom\">grid-following current control<\/a> structure. Finally, this approach is validated by implementing the ANN-based controller on a physical <a href=\"https:\/\/imperix.com\/products\/power\/programmable-inverter\/\" type=\"link\" id=\"https:\/\/imperix.com\/products\/power\/programmable-inverter\/\">TPI8032<\/a> programmable inverter.<\/p>\n\n\n<style>.wp-block-kadence-advancedheading.kt-adv-heading43088_3e0dd6-1c, .wp-block-kadence-advancedheading.kt-adv-heading43088_3e0dd6-1c[data-kb-block=\"kb-adv-heading43088_3e0dd6-1c\"]{font-style:normal;}.wp-block-kadence-advancedheading.kt-adv-heading43088_3e0dd6-1c mark.kt-highlight, .wp-block-kadence-advancedheading.kt-adv-heading43088_3e0dd6-1c[data-kb-block=\"kb-adv-heading43088_3e0dd6-1c\"] mark.kt-highlight{font-style:normal;color:#f76a0c;-webkit-box-decoration-break:clone;box-decoration-break:clone;padding-top:0px;padding-right:0px;padding-bottom:0px;padding-left:0px;}.wp-block-kadence-advancedheading.kt-adv-heading43088_3e0dd6-1c img.kb-inline-image, .wp-block-kadence-advancedheading.kt-adv-heading43088_3e0dd6-1c[data-kb-block=\"kb-adv-heading43088_3e0dd6-1c\"] img.kb-inline-image{width:150px;vertical-align:baseline;}<\/style>\n<h2 class=\"kt-adv-heading43088_3e0dd6-1c wp-block-kadence-advancedheading\" data-kb-block=\"kb-adv-heading43088_3e0dd6-1c\"><span class=\"ez-toc-section\" id=\"Classification-of-ANNs\"><\/span>Classification of ANNs<span class=\"ez-toc-section-end\"><\/span><\/h2>\n\n\n\n<p>Depending on the learning task, ANNs are generally categorized into two main groups: classification and regression. Classification is used for discrete decisions, while regression handles continuous predictions. In power electronics, this distinction aligns perfectly with how the ANN drives the converter:<\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><strong>Direct control (Classification):<\/strong> The ANN directly generates the discrete gate signals (e.g., ON\/OFF) for the semiconductor switches. As the outputs are discrete states, this is formulated as a classification problem.<\/li>\n\n\n\n<li><strong>Indirect control (Regression):<\/strong> The ANN calculates a continuous control variable, such as the voltage reference or duty cycle, which is then fed into a modulator. As the output is a continuous numerical value, this requires a regression model.<\/li>\n<\/ul>\n\n\n\n<p>In this technical note, the ANN replaces a standard discrete PI current controller, meaning it needs to map a continuous voltage reference. Therefore, an indirect control strategy based on a regression ANN is implemented.<\/p>\n\n\n\n<p>Beyond their learning task, ANNs can also be classified by their internal topology into feedforward and recurrent architectures. Feedforward neural networks (FNNs) process data strictly in one direction (see Fig. 1). As static, memoryless systems, FNNs lack an internal state and require past samples to be explicitly included in the input vector to account for historical data. Standard feedforward models include Multilayer Perceptrons, Radial Basis Function networks, and Convolutional Neural Networks. In contrast, recurrent networks, including LSTMs and GRUs, incorporate feedback loops that retain an internal memory of past inputs (see Fig. 2). This makes them highly effective for modeling dynamic, time-dependent systems [3].<\/p>\n\n\n\n<p>From all of these available ANN architectures, the Multilayer Perceptron (MLP) FNN is frequently adopted as a baseline due to its simplicity and implementation efficiency and is widely used in industrial applications [1]. Therefore, for this technical note, the MLP-FNN is selected for its strictly feedforward architecture, which ensures deterministic, low-latency execution required for a high-bandwidth controller. Additionally, its capability as a universal function approximator allows it to accurately reproduce the PI controller\u2019s input-output mapping with minimal computational overhead.<\/p>\n\n\n\n<div class=\"wp-block-columns is-layout-flex wp-container-core-columns-is-layout-9d6595d7 wp-block-columns-is-layout-flex\">\n<div class=\"wp-block-column is-layout-flow wp-block-column-is-layout-flow\"><div class=\"wp-block-image\">\n<figure class=\"aligncenter size-large\" id=\"fig_FNN\"><img loading=\"lazy\" decoding=\"async\" width=\"1024\" height=\"556\" src=\"https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/02\/FNN-2-1024x556.png\" alt=\"\" class=\"wp-image-42873\" srcset=\"https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/02\/FNN-2-1024x556.png 1024w, https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/02\/FNN-2-300x163.png 300w, https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/02\/FNN-2-768x417.png 768w, https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/02\/FNN-2-1536x833.png 1536w, https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/02\/FNN-2.png 2031w\" sizes=\"auto, (max-width: 1024px) 100vw, 1024px\" \/><figcaption class=\"wp-element-caption\">Fig. 1: Feedforward neural network architecture with multiple hidden layers<\/figcaption><\/figure>\n<\/div><\/div>\n\n\n\n<div class=\"wp-block-column is-layout-flow wp-block-column-is-layout-flow\"><div class=\"wp-block-image\">\n<figure class=\"aligncenter size-large\"><img loading=\"lazy\" decoding=\"async\" width=\"1024\" height=\"773\" src=\"https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/02\/RNN_v4-1024x773.png\" alt=\"\" class=\"wp-image-42848\" srcset=\"https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/02\/RNN_v4-1024x773.png 1024w, https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/02\/RNN_v4-300x227.png 300w, https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/02\/RNN_v4-768x580.png 768w, https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/02\/RNN_v4-1536x1160.png 1536w, https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/02\/RNN_v4.png 1651w\" sizes=\"auto, (max-width: 1024px) 100vw, 1024px\" \/><figcaption class=\"wp-element-caption\">Fig. 2: Recurrent neural network architecture<\/figcaption><\/figure>\n<\/div><\/div>\n<\/div>\n\n\n<style>.wp-block-kadence-advancedheading.kt-adv-heading43088_43b2f8-a8, .wp-block-kadence-advancedheading.kt-adv-heading43088_43b2f8-a8[data-kb-block=\"kb-adv-heading43088_43b2f8-a8\"]{font-style:normal;}.wp-block-kadence-advancedheading.kt-adv-heading43088_43b2f8-a8 mark.kt-highlight, .wp-block-kadence-advancedheading.kt-adv-heading43088_43b2f8-a8[data-kb-block=\"kb-adv-heading43088_43b2f8-a8\"] mark.kt-highlight{font-style:normal;color:#f76a0c;-webkit-box-decoration-break:clone;box-decoration-break:clone;padding-top:0px;padding-right:0px;padding-bottom:0px;padding-left:0px;}.wp-block-kadence-advancedheading.kt-adv-heading43088_43b2f8-a8 img.kb-inline-image, .wp-block-kadence-advancedheading.kt-adv-heading43088_43b2f8-a8[data-kb-block=\"kb-adv-heading43088_43b2f8-a8\"] img.kb-inline-image{width:150px;vertical-align:baseline;}<\/style>\n<h2 class=\"kt-adv-heading43088_43b2f8-a8 wp-block-kadence-advancedheading\" data-kb-block=\"kb-adv-heading43088_43b2f8-a8\"><span class=\"ez-toc-section\" id=\"MLP-FNN-architecture\"><\/span>MLP-FNN architecture <span class=\"ez-toc-section-end\"><\/span><\/h2>\n\n\n\n<p>An MLP-FNN architecture typically comprises an input layer, one or more hidden layers, and an output layer, each containing one or multiple interconnected neurons, as illustrated in <a href=\"#fig_FNN\" type=\"internal\" id=\"#fig_FNN\">Fig. 1<\/a>. Within each neuron, incoming signals are multiplied by their respective connection weights (\\(w\\)), which represent the strength of the input, summed together, and then shifted by a bias term (\\(b\\)). Finally, the aggregated value is passed through an <a href=\"https:\/\/en.wikipedia.org\/wiki\/Activation_function\" type=\"link\" id=\"https:\/\/en.wikipedia.org\/wiki\/Activation_function\">activation function<\/a> to generate the neuron&#8217;s output signal. This function is a mathematical operation that can include non-linearity, allowing the network to learn and model complex patterns beyond simple linear relationships. By iteratively adjusting the weights and biases during the training phase, the FNN encodes the learned relationships, enabling it to generalize accurately across its intended operating region [3].<\/p>\n\n\n\n<p>Important concepts related to ANNs in general, and FNNs in particular, are detailed below:<\/p>\n\n\n<style>.wp-block-kadence-advancedheading.kt-adv-heading43088_95fd84-01, .wp-block-kadence-advancedheading.kt-adv-heading43088_95fd84-01[data-kb-block=\"kb-adv-heading43088_95fd84-01\"]{font-style:normal;}.wp-block-kadence-advancedheading.kt-adv-heading43088_95fd84-01 mark.kt-highlight, .wp-block-kadence-advancedheading.kt-adv-heading43088_95fd84-01[data-kb-block=\"kb-adv-heading43088_95fd84-01\"] mark.kt-highlight{font-style:normal;color:#f76a0c;-webkit-box-decoration-break:clone;box-decoration-break:clone;padding-top:0px;padding-right:0px;padding-bottom:0px;padding-left:0px;}.wp-block-kadence-advancedheading.kt-adv-heading43088_95fd84-01 img.kb-inline-image, .wp-block-kadence-advancedheading.kt-adv-heading43088_95fd84-01[data-kb-block=\"kb-adv-heading43088_95fd84-01\"] img.kb-inline-image{width:150px;vertical-align:baseline;}<\/style>\n<h3 class=\"kt-adv-heading43088_95fd84-01 wp-block-kadence-advancedheading\" data-kb-block=\"kb-adv-heading43088_95fd84-01\"><span class=\"ez-toc-section\" id=\"Mathematical-representation-of-FNN\"><\/span>Mathematical representation of FNN<span class=\"ez-toc-section-end\"><\/span><\/h3>\n\n\n\n<p>Referring to <a href=\"#fig_FNN\">Fig. 1<\/a>, let the input vector be <math data-latex=\"x=[x_1\u200b\u200b\u22ef\u200bx_m\u200b\u200b]^\u22a4\u2208R^m\"><semantics><mrow><mi>x<\/mi><mo>=<\/mo><mo form=\"prefix\" stretchy=\"false\">[<\/mo><msub><mi>x<\/mi><mn>1<\/mn><\/msub><mtext>\u200b<\/mtext><mtext>\u200b<\/mtext><mo>\u22ef<\/mo><mtext>\u200b<\/mtext><msub><mi>x<\/mi><mi>m<\/mi><\/msub><mtext>\u200b<\/mtext><mtext>\u200b<\/mtext><msup><mo form=\"postfix\" stretchy=\"false\">]<\/mo><mtext>\u22a4<\/mtext><\/msup><mo>\u2208<\/mo><msup><mi>R<\/mi><mi>m<\/mi><\/msup><\/mrow><annotation encoding=\"application\/x-tex\">x=[x_1\u200b\u200b\u22ef\u200bx_m\u200b\u200b]^\u22a4\u2208R^m<\/annotation><\/semantics><\/math>, and the output vector be <math data-latex=\"y=[y_1\u200b\u200b\u22ef\u200by_n\u200b\u200b]^\u22a4\u2208R^n.\"><semantics><mrow><mi>y<\/mi><mo>=<\/mo><mo form=\"prefix\" stretchy=\"false\">[<\/mo><msub><mi>y<\/mi><mn>1<\/mn><\/msub><mtext>\u200b<\/mtext><mtext>\u200b<\/mtext><mo>\u22ef<\/mo><mtext>\u200b<\/mtext><msub><mi>y<\/mi><mi>n<\/mi><\/msub><mtext>\u200b<\/mtext><mtext>\u200b<\/mtext><msup><mo form=\"postfix\" stretchy=\"false\">]<\/mo><mtext>\u22a4<\/mtext><\/msup><mo>\u2208<\/mo><msup><mi>R<\/mi><mi>n<\/mi><\/msup><mi>.<\/mi><\/mrow><annotation encoding=\"application\/x-tex\">y=[y_1\u200b\u200b\u22ef\u200by_n\u200b\u200b]^\u22a4\u2208R^n.<\/annotation><\/semantics><\/math><\/p>\n\n\n\n<p>A typical mathematical expression for the output of neuron <math data-latex=\"j\"><semantics><mi>j<\/mi><annotation encoding=\"application\/x-tex\">j<\/annotation><\/semantics><\/math> in layer <math data-latex=\"l\"><semantics><mi>l<\/mi><annotation encoding=\"application\/x-tex\">l<\/annotation><\/semantics><\/math>:<\/p>\n\n\n\n<div class=\"wp-block-math\"><math display=\"block\"><semantics><mrow><msubsup><mi>y<\/mi><mi>j<\/mi><mrow><mo form=\"prefix\" stretchy=\"false\" lspace=\"0em\" rspace=\"0em\">(<\/mo><mi>l<\/mi><mo form=\"postfix\" stretchy=\"false\" lspace=\"0em\" rspace=\"0em\">)<\/mo><\/mrow><\/msubsup><mo>=<\/mo><msup><mi>N<\/mi><mrow><mo form=\"prefix\" stretchy=\"false\" lspace=\"0em\" rspace=\"0em\">(<\/mo><mi>l<\/mi><mo form=\"postfix\" stretchy=\"false\" lspace=\"0em\" rspace=\"0em\">)<\/mo><\/mrow><\/msup><mrow><mo fence=\"true\" form=\"prefix\">(<\/mo><mrow><munderover><mo movablelimits=\"false\">\u2211<\/mo><mrow><mi>i<\/mi><mo>=<\/mo><mn>1<\/mn><\/mrow><msub><mi>n<\/mi><mrow><mi>l<\/mi><mo>\u2212<\/mo><mn>1<\/mn><\/mrow><\/msub><\/munderover><\/mrow><msubsup><mi>w<\/mi><mrow><mi>j<\/mi><mi>i<\/mi><\/mrow><mrow><mo form=\"prefix\" stretchy=\"false\" lspace=\"0em\" rspace=\"0em\">(<\/mo><mi>l<\/mi><mo form=\"postfix\" stretchy=\"false\" lspace=\"0em\" rspace=\"0em\">)<\/mo><\/mrow><\/msubsup><msub><mi>x<\/mi><mi>i<\/mi><\/msub><mo>+<\/mo><msubsup><mi>b<\/mi><mi>j<\/mi><mrow><mo form=\"prefix\" stretchy=\"false\" lspace=\"0em\" rspace=\"0em\">(<\/mo><mi>l<\/mi><mo form=\"postfix\" stretchy=\"false\" lspace=\"0em\" rspace=\"0em\">)<\/mo><\/mrow><\/msubsup><mo fence=\"true\" form=\"postfix\">)<\/mo><\/mrow><\/mrow><annotation encoding=\"application\/x-tex\">y_j^{(l)} = N^{(l)} \\left( \\sum_{i=1}^{n_{l-1}} w_{ji}^{(l)} x_i + b_j^{(l)} \\right)<\/annotation><\/semantics><\/math><\/div>\n\n\n\n<p>where <math data-latex=\"w_{ji}\"><semantics><msub><mi>w<\/mi><mrow><mi>j<\/mi><mi>i<\/mi><\/mrow><\/msub><annotation encoding=\"application\/x-tex\">w_{ji}<\/annotation><\/semantics><\/math> is the weight connecting neuron <math data-latex=\"i\"><semantics><mi>i<\/mi><annotation encoding=\"application\/x-tex\">i<\/annotation><\/semantics><\/math> in layer <math data-latex=\"l-1\"><semantics><mrow><mi>l<\/mi><mo>\u2212<\/mo><mn>1<\/mn><\/mrow><annotation encoding=\"application\/x-tex\">l-1<\/annotation><\/semantics><\/math> to neuron <math data-latex=\"j\"><semantics><mi>j<\/mi><annotation encoding=\"application\/x-tex\">j<\/annotation><\/semantics><\/math> in layer <math data-latex=\"l\"><semantics><mi>l<\/mi><annotation encoding=\"application\/x-tex\">l<\/annotation><\/semantics><\/math>, <math data-latex=\"b_j\"><semantics><msub><mi>b<\/mi><mi>j<\/mi><\/msub><annotation encoding=\"application\/x-tex\">b_j<\/annotation><\/semantics><\/math> is an offset, <math data-latex=\"n_{l-1}\"><semantics><msub><mi>n<\/mi><mrow><mi>l<\/mi><mo>\u2212<\/mo><mn>1<\/mn><\/mrow><\/msub><annotation encoding=\"application\/x-tex\">n_{l-1}<\/annotation><\/semantics><\/math> is the number of neurons in the preceding layer, and <math data-latex=\"N_{(l)}\"><semantics><msub><mi>N<\/mi><mrow><mo form=\"prefix\" stretchy=\"false\" lspace=\"0em\" rspace=\"0em\">(<\/mo><mi>l<\/mi><mo form=\"postfix\" stretchy=\"false\" lspace=\"0em\" rspace=\"0em\">)<\/mo><\/mrow><\/msub><annotation encoding=\"application\/x-tex\">N_{(l)}<\/annotation><\/semantics><\/math>( \u22c5 ) is the activation function of layer <math xmlns=\"http:\/\/www.w3.org\/1998\/Math\/MathML\"><semantics><mrow><mi mathvariant=\"normal\">\u2113<\/mi><\/mrow><annotation encoding=\"application\/x-tex\">\\ell<\/annotation><\/semantics><\/math>.<\/p>\n\n\n<style>.wp-block-kadence-advancedheading.kt-adv-heading43088_3c020c-51, .wp-block-kadence-advancedheading.kt-adv-heading43088_3c020c-51[data-kb-block=\"kb-adv-heading43088_3c020c-51\"]{font-style:normal;}.wp-block-kadence-advancedheading.kt-adv-heading43088_3c020c-51 mark.kt-highlight, .wp-block-kadence-advancedheading.kt-adv-heading43088_3c020c-51[data-kb-block=\"kb-adv-heading43088_3c020c-51\"] mark.kt-highlight{font-style:normal;color:#f76a0c;-webkit-box-decoration-break:clone;box-decoration-break:clone;padding-top:0px;padding-right:0px;padding-bottom:0px;padding-left:0px;}.wp-block-kadence-advancedheading.kt-adv-heading43088_3c020c-51 img.kb-inline-image, .wp-block-kadence-advancedheading.kt-adv-heading43088_3c020c-51[data-kb-block=\"kb-adv-heading43088_3c020c-51\"] img.kb-inline-image{width:150px;vertical-align:baseline;}<\/style>\n<h3 id=\"activation_function\" class=\"kt-adv-heading43088_3c020c-51 wp-block-kadence-advancedheading\" data-kb-block=\"kb-adv-heading43088_3c020c-51\"><span class=\"ez-toc-section\" id=\"Activation-functions\"><\/span>Activation functions<span class=\"ez-toc-section-end\"><\/span><\/h3>\n\n\n\n<p>To enable the network to learn complex non-linear mappings, the neurons\u00a0must utilize non-linear\u00a0activation functions. For the network to be compatible with standard network training algorithms, these activation functions must be differentiable or piecewise differentiable. Among the various options available, the Rectified Linear Unit (ReLU) is the most commonly used due to its computational efficiency and effectiveness in mitigating gradient-related issues during training [4]. Fig. 3 illustrates\u00a0some important activation functions commonly used in ANNs.<\/p>\n\n\n<style>.kb-image43088_591c3e-ab .kb-image-has-overlay:after{opacity:0.3;}<\/style>\n<div class=\"wp-block-kadence-image kb-image43088_591c3e-ab\" id=\"figure3\"><figure class=\"aligncenter size-full\"><img loading=\"lazy\" decoding=\"async\" width=\"900\" height=\"400\" src=\"https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/02\/common_activation_fuction_overlapped2.png\" alt=\"\" class=\"kb-img wp-image-43193\" srcset=\"https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/02\/common_activation_fuction_overlapped2.png 900w, https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/02\/common_activation_fuction_overlapped2-300x133.png 300w, https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/02\/common_activation_fuction_overlapped2-768x341.png 768w\" sizes=\"auto, (max-width: 900px) 100vw, 900px\" \/><figcaption>Fig. 3: Commonly applied activation functions.<\/figcaption><\/figure><\/div>\n\n\n<style>.wp-block-kadence-advancedheading.kt-adv-heading43088_bb7bab-6c, .wp-block-kadence-advancedheading.kt-adv-heading43088_bb7bab-6c[data-kb-block=\"kb-adv-heading43088_bb7bab-6c\"]{font-style:normal;}.wp-block-kadence-advancedheading.kt-adv-heading43088_bb7bab-6c mark.kt-highlight, .wp-block-kadence-advancedheading.kt-adv-heading43088_bb7bab-6c[data-kb-block=\"kb-adv-heading43088_bb7bab-6c\"] mark.kt-highlight{font-style:normal;color:#f76a0c;-webkit-box-decoration-break:clone;box-decoration-break:clone;padding-top:0px;padding-right:0px;padding-bottom:0px;padding-left:0px;}.wp-block-kadence-advancedheading.kt-adv-heading43088_bb7bab-6c img.kb-inline-image, .wp-block-kadence-advancedheading.kt-adv-heading43088_bb7bab-6c[data-kb-block=\"kb-adv-heading43088_bb7bab-6c\"] img.kb-inline-image{width:150px;vertical-align:baseline;}<\/style>\n<h3 class=\"kt-adv-heading43088_bb7bab-6c wp-block-kadence-advancedheading\" data-kb-block=\"kb-adv-heading43088_bb7bab-6c\"><span class=\"ez-toc-section\" id=\"Network-topology-and-design-flexibility\"><\/span><strong>Network topology and design flexibility<\/strong><span class=\"ez-toc-section-end\"><\/span><\/h3>\n\n\n\n<p>FNN offers a significant degree of design flexibility. While the dimensionality of the input and output layers is typically determined by the physical system, the internal structure is largely user-defined. Key design choices include the number of hidden layers (depth), the number of neurons per layer (width), and the connectivity scheme (e.g., all-to-all vs. sparsified connections). Specifically, increasing the network&#8217;s depth enables it to learn highly complex, hierarchical abstractions of the data, while expanding its width allows it to capture a broader array of features at each processing stage. However, a model with excessive depth or width risks <a href=\"https:\/\/en.wikipedia.org\/wiki\/Overfitting\" type=\"link\" id=\"https:\/\/en.wikipedia.org\/wiki\/Overfitting\">overfitting<\/a> and needlessly inflating the execution time. Ultimately, this architectural freedom allows the user to carefully balance model learning capacity against the strict computational constraints of the physical application [4].<\/p>\n\n\n\n<div class=\"wp-block-simple-alerts-for-gutenberg-alert-boxes sab-alert sab-alert-info\" role=\"alert\">Overfitting occurs when a model learns the specific details and noise of the training data rather than the underlying general pattern. This results in high accuracy on the training set but poor performance when predicting new, unseen data.<\/div>\n\n\n<style>.wp-block-kadence-advancedheading.kt-adv-heading43088_001316-62, .wp-block-kadence-advancedheading.kt-adv-heading43088_001316-62[data-kb-block=\"kb-adv-heading43088_001316-62\"]{font-style:normal;}.wp-block-kadence-advancedheading.kt-adv-heading43088_001316-62 mark.kt-highlight, .wp-block-kadence-advancedheading.kt-adv-heading43088_001316-62[data-kb-block=\"kb-adv-heading43088_001316-62\"] mark.kt-highlight{font-style:normal;color:#f76a0c;-webkit-box-decoration-break:clone;box-decoration-break:clone;padding-top:0px;padding-right:0px;padding-bottom:0px;padding-left:0px;}.wp-block-kadence-advancedheading.kt-adv-heading43088_001316-62 img.kb-inline-image, .wp-block-kadence-advancedheading.kt-adv-heading43088_001316-62[data-kb-block=\"kb-adv-heading43088_001316-62\"] img.kb-inline-image{width:150px;vertical-align:baseline;}<\/style>\n<h3 class=\"kt-adv-heading43088_001316-62 wp-block-kadence-advancedheading\" data-kb-block=\"kb-adv-heading43088_001316-62\"><span class=\"ez-toc-section\" id=\"Training-algorithms\"><\/span>Training algorithms<span class=\"ez-toc-section-end\"><\/span><\/h3>\n\n\n\n<p>The training algorithm used to calculate the network\u2019s weight matrix depends on the architecture&#8217;s complexity. For simple, single-layer networks with linear input-output relationships, analytical methods such as the Pseudo-inverse or LASSO can be used for the direct optimization of the weights and biases. However, for multi-layer ANNs utilizing non-linear activation functions, iterative optimization methods are required.<\/p>\n\n\n\n<p>Central to this iterative process is the loss function, which quantifies the error between the model&#8217;s predictions and the actual target values. Its primary function is to guide the optimization process, which iteratively adjusts the network&#8217;s weights and bias to minimize the error and improve the model&#8217;s accuracy.<\/p>\n\n\n\n<p>The process of minimizing the loss in MLP-FNNs relies on two complementary phases: calculating the error gradients and updating the network weights and biases:<\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><strong>Backpropagation<\/strong> is a computational algorithm that calculates the gradients of the loss function with respect to each weight by propagating the error information backward through the layers using the <a href=\"https:\/\/en.wikipedia.org\/wiki\/Chain_rule\" type=\"link\" id=\"https:\/\/en.wikipedia.org\/wiki\/Chain_rule\">chain rule<\/a>.<\/li>\n\n\n\n<li><strong>Optimization algorithms<\/strong> use those calculated gradients to update the network parameters. While <a href=\"https:\/\/en.wikipedia.org\/wiki\/Stochastic_gradient_descent\" type=\"link\" id=\"https:\/\/en.wikipedia.org\/wiki\/Stochastic_gradient_descent\">Stochastic Gradient Descent<\/a> is a common first-order optimizer that takes steps in the direction of the steepest descent, second-order methods like the <a href=\"https:\/\/en.wikipedia.org\/wiki\/Levenberg%E2%80%93Marquardt_algorithm\" type=\"link\" id=\"https:\/\/en.wikipedia.org\/wiki\/Levenberg%E2%80%93Marquardt_algorithm\">Levenberg-Marquardt<\/a> algorithm are also widely used. Levenberg-Marquardt dynamically blends gradient descent with Gauss-Newton optimization, allowing for much faster convergence on moderate-sized problems.<\/li>\n<\/ul>\n\n\n\n<p>Together, these mechanisms enable the network to learn from data. However, the speed and stability of this convergence are heavily influenced by the chosen <a href=\"#hyperparameters\">hyperparameters<\/a>. These hyperparameters are external configuration variables set&nbsp;before&nbsp;the training process begins, as opposed to parameters (weights and biases), which are learned during training. They govern the network&#8217;s structure (e.g., number of layers, number of neurons) and the learning process (e.g., learning rate, batch size) to optimize performance.<\/p>\n\n\n<style>.wp-block-kadence-advancedheading.kt-adv-heading43088_686ce8-86, .wp-block-kadence-advancedheading.kt-adv-heading43088_686ce8-86[data-kb-block=\"kb-adv-heading43088_686ce8-86\"]{font-style:normal;}.wp-block-kadence-advancedheading.kt-adv-heading43088_686ce8-86 mark.kt-highlight, .wp-block-kadence-advancedheading.kt-adv-heading43088_686ce8-86[data-kb-block=\"kb-adv-heading43088_686ce8-86\"] mark.kt-highlight{font-style:normal;color:#f76a0c;-webkit-box-decoration-break:clone;box-decoration-break:clone;padding-top:0px;padding-right:0px;padding-bottom:0px;padding-left:0px;}.wp-block-kadence-advancedheading.kt-adv-heading43088_686ce8-86 img.kb-inline-image, .wp-block-kadence-advancedheading.kt-adv-heading43088_686ce8-86[data-kb-block=\"kb-adv-heading43088_686ce8-86\"] img.kb-inline-image{width:150px;vertical-align:baseline;}<\/style>\n<h3 class=\"kt-adv-heading43088_686ce8-86 wp-block-kadence-advancedheading\" data-kb-block=\"kb-adv-heading43088_686ce8-86\"><span class=\"ez-toc-section\" id=\"Data-partitioning-and-validation\"><\/span><strong>Data partitioning and validation<\/strong><span class=\"ez-toc-section-end\"><\/span><\/h3>\n\n\n\n<p>To ensure the model generalizes effectively to unseen operating conditions, the dataset is typically partitioned into three distinct subsets: <strong>training<\/strong>, <strong>validation<\/strong>, and <strong>testing<\/strong>. The training data is used directly by the algorithm to update the network&#8217;s weights and biases. While hyperparameters must be fixed before a training run begins, the validation data is used to evaluate the model&#8217;s performance under those specific settings, allowing the designer to iteratively tune the configuration across multiple independent runs. Finally, the test set provides a completely independent, unbiased evaluation of the final model to confirm its true performance on strictly unseen data.<\/p>\n\n\n\n<p>During optimization, the training algorithm iterates through the data over several epochs, continuously adjusting the weights and biases to minimize the prediction error. This training process typically continues until the error on the validation set reaches a threshold and begins to increase. Halting the process at this exact point, a technique known as <a href=\"https:\/\/en.wikipedia.org\/wiki\/Early_stopping\" type=\"link\" id=\"https:\/\/en.wikipedia.org\/wiki\/Early_stopping\">early stopping<\/a>, is crucial to prevent the network from overfitting, thereby ensuring robust performance when deployed [4].<\/p>\n\n\n<style>.wp-block-kadence-advancedheading.kt-adv-heading43088_4da170-46, .wp-block-kadence-advancedheading.kt-adv-heading43088_4da170-46[data-kb-block=\"kb-adv-heading43088_4da170-46\"]{font-style:normal;}.wp-block-kadence-advancedheading.kt-adv-heading43088_4da170-46 mark.kt-highlight, .wp-block-kadence-advancedheading.kt-adv-heading43088_4da170-46[data-kb-block=\"kb-adv-heading43088_4da170-46\"] mark.kt-highlight{font-style:normal;color:#f76a0c;-webkit-box-decoration-break:clone;box-decoration-break:clone;padding-top:0px;padding-right:0px;padding-bottom:0px;padding-left:0px;}.wp-block-kadence-advancedheading.kt-adv-heading43088_4da170-46 img.kb-inline-image, .wp-block-kadence-advancedheading.kt-adv-heading43088_4da170-46[data-kb-block=\"kb-adv-heading43088_4da170-46\"] img.kb-inline-image{width:150px;vertical-align:baseline;}<\/style>\n<h2 class=\"kt-adv-heading43088_4da170-46 wp-block-kadence-advancedheading\" data-kb-block=\"kb-adv-heading43088_4da170-46\"><span class=\"ez-toc-section\" id=\"Challenges-of-ANN-based-control\"><\/span>Challenges of ANN-based control<span class=\"ez-toc-section-end\"><\/span><\/h2>\n\n\n\n<p>Implementing ANN-based control in converter applications presents several specific challenges:<\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><strong>Data dependencies:<\/strong>&nbsp;The controller&#8217;s performance is intrinsically tied to the quality and breadth of the training dataset. Obtaining large, well-balanced datasets that cover the entire operating region, including rare events like faults, is often costly and time-consuming. Underrepresentation of these edge cases can lead to poor generalization.<\/li>\n\n\n\n<li><strong>Design trade-offs and overfitting:<\/strong>&nbsp;Selecting the network architecture requires balancing accuracy against robustness. While increasing network depth and width enhances representational capacity, it raises computational costs and increases the risk of overfitting (learning noise rather than system dynamics). <a href=\"https:\/\/en.wikipedia.org\/wiki\/Cross-validation_(statistics)\" type=\"link\" id=\"https:\/\/en.wikipedia.org\/wiki\/Cross-validation_(statistics)\">Cross-validation<\/a> is essential to mitigate this risk [4].<\/li>\n\n\n\n<li><strong>Verification and interpretability:<\/strong>&nbsp;Unlike standard linear controllers, ANNs often function as &#8220;black boxes&#8221; lacking analytical stability proofs. Consequently, safety-critical deployment typically requires auxiliary safety mechanisms, such as output saturations, runtime monitors, and fallback logic.<\/li>\n<\/ul>\n\n\n<style>.wp-block-kadence-advancedheading.kt-adv-heading43088_b076d5-6e, .wp-block-kadence-advancedheading.kt-adv-heading43088_b076d5-6e[data-kb-block=\"kb-adv-heading43088_b076d5-6e\"]{font-style:normal;}.wp-block-kadence-advancedheading.kt-adv-heading43088_b076d5-6e mark.kt-highlight, .wp-block-kadence-advancedheading.kt-adv-heading43088_b076d5-6e[data-kb-block=\"kb-adv-heading43088_b076d5-6e\"] mark.kt-highlight{font-style:normal;color:#f76a0c;-webkit-box-decoration-break:clone;box-decoration-break:clone;padding-top:0px;padding-right:0px;padding-bottom:0px;padding-left:0px;}.wp-block-kadence-advancedheading.kt-adv-heading43088_b076d5-6e img.kb-inline-image, .wp-block-kadence-advancedheading.kt-adv-heading43088_b076d5-6e[data-kb-block=\"kb-adv-heading43088_b076d5-6e\"] img.kb-inline-image{width:150px;vertical-align:baseline;}<\/style>\n<h2 class=\"kt-adv-heading43088_b076d5-6e wp-block-kadence-advancedheading\" data-kb-block=\"kb-adv-heading43088_b076d5-6e\"><span class=\"ez-toc-section\" id=\"Experimental-implementation-of-an-ANN-based-control\"><\/span>Experimental implementation of an ANN-based control<span class=\"ez-toc-section-end\"><\/span><\/h2>\n\n\n\n<p>As an application example, an indirect MLP-FNN is implemented to regulate the output current of a three-phase grid-following converter. This example replaces the inner-loop PI current regulator with an offline-trained MLP-FNN. The deployment of the indirect FNN is performed at the controller interface, as shown in Fig. 4. The PLL synchronization, modulation (<a href=\"https:\/\/imperix.com\/doc\/implementation\/space-vector-modulation\" type=\"link\" id=\"https:\/\/imperix.com\/doc\/implementation\/space-vector-modulation\">SVM<\/a>), and protections remain unchanged, while the FNN reproduces the PI controller\u2019s voltage-reference generation behavior.<\/p>\n\n\n<style>.kb-image43088_43f9f4-e8 .kb-image-has-overlay:after{opacity:0.3;}<\/style>\n<div class=\"wp-block-kadence-image kb-image43088_43f9f4-e8\"><figure class=\"aligncenter size-full\"><img loading=\"lazy\" decoding=\"async\" width=\"1002\" height=\"547\" src=\"https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/01\/FNN-based-controller2.png\" alt=\"\" class=\"kb-img wp-image-41368\" srcset=\"https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/01\/FNN-based-controller2.png 1002w, https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/01\/FNN-based-controller2-300x164.png 300w, https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/01\/FNN-based-controller2-768x419.png 768w\" sizes=\"auto, (max-width: 1002px) 100vw, 1002px\" \/><figcaption>Fig. 4: Inputs and outputs of the FNN-based controller together with the standard vector current control scheme<\/figcaption><\/figure><\/div>\n\n\n<style>.wp-block-kadence-advancedheading.kt-adv-heading43088_3cc28e-cc, .wp-block-kadence-advancedheading.kt-adv-heading43088_3cc28e-cc[data-kb-block=\"kb-adv-heading43088_3cc28e-cc\"]{font-style:normal;}.wp-block-kadence-advancedheading.kt-adv-heading43088_3cc28e-cc mark.kt-highlight, .wp-block-kadence-advancedheading.kt-adv-heading43088_3cc28e-cc[data-kb-block=\"kb-adv-heading43088_3cc28e-cc\"] mark.kt-highlight{font-style:normal;color:#f76a0c;-webkit-box-decoration-break:clone;box-decoration-break:clone;padding-top:0px;padding-right:0px;padding-bottom:0px;padding-left:0px;}.wp-block-kadence-advancedheading.kt-adv-heading43088_3cc28e-cc img.kb-inline-image, .wp-block-kadence-advancedheading.kt-adv-heading43088_3cc28e-cc[data-kb-block=\"kb-adv-heading43088_3cc28e-cc\"] img.kb-inline-image{width:150px;vertical-align:baseline;}<\/style>\n<h3 class=\"kt-adv-heading43088_3cc28e-cc wp-block-kadence-advancedheading\" data-kb-block=\"kb-adv-heading43088_3cc28e-cc\"><span class=\"ez-toc-section\" id=\"Structure-of-FNN\"><\/span>Structure of FNN<span class=\"ez-toc-section-end\"><\/span><\/h3>\n\n\n\n<p>Instead of learning the full converter dynamics, the FNN is trained to emulate the mapping performed by the discrete PI current controller. At each control step&nbsp;<math xmlns=\"http:\/\/www.w3.org\/1998\/Math\/MathML\"><semantics><mrow><mi>k<\/mi><\/mrow><\/semantics><\/math>, the FNN receives a set of controller-relevant input signals, and it outputs the corresponding&nbsp;voltage reference in dq coordinates. The input vector is constructed such that it captures the useful information that could be helpful for FNN in mapping the input vector to the output vector.<\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li><strong>Inputs (5 inputs):<\/strong> <math data-latex=\"x(k)=[\\sum{K_i e_d(k)}, \\sum{K_i e_q\u200b(k)},e_d (k),e _q (k), \u03c9(k)]\"><semantics><mrow><mi>x<\/mi><mo form=\"prefix\" stretchy=\"false\">(<\/mo><mi>k<\/mi><mo form=\"postfix\" stretchy=\"false\">)<\/mo><mo>=<\/mo><mo form=\"prefix\" stretchy=\"false\">[<\/mo><mo movablelimits=\"false\">\u2211<\/mo><mrow><msub><mi>K<\/mi><mi>i<\/mi><\/msub><msub><mi>e<\/mi><mi>d<\/mi><\/msub><mo form=\"prefix\" stretchy=\"false\">(<\/mo><mi>k<\/mi><mo form=\"postfix\" stretchy=\"false\" lspace=\"0em\" rspace=\"0em\">)<\/mo><\/mrow><mo separator=\"true\">,<\/mo><mo movablelimits=\"false\">\u2211<\/mo><mrow><msub><mi>K<\/mi><mi>i<\/mi><\/msub><msub><mi>e<\/mi><mi>q<\/mi><\/msub><mtext>\u200b<\/mtext><mo form=\"prefix\" stretchy=\"false\">(<\/mo><mi>k<\/mi><mo form=\"postfix\" stretchy=\"false\" lspace=\"0em\" rspace=\"0em\">)<\/mo><\/mrow><mo separator=\"true\">,<\/mo><msub><mi>e<\/mi><mi>d<\/mi><\/msub><mo form=\"prefix\" stretchy=\"false\">(<\/mo><mi>k<\/mi><mo form=\"postfix\" stretchy=\"false\">)<\/mo><mo separator=\"true\">,<\/mo><msub><mi>e<\/mi><mi>q<\/mi><\/msub><mo form=\"prefix\" stretchy=\"false\">(<\/mo><mi>k<\/mi><mo form=\"postfix\" stretchy=\"false\">)<\/mo><mo separator=\"true\">,<\/mo><mi>\u03c9<\/mi><mo form=\"prefix\" stretchy=\"false\">(<\/mo><mi>k<\/mi><mo form=\"postfix\" stretchy=\"false\">)<\/mo><mo form=\"postfix\" stretchy=\"false\">]<\/mo><\/mrow><annotation encoding=\"application\/x-tex\">x(k)=[\\sum{K_i e_d(k)}, \\sum{K_i e_q\u200b(k)},e_d (k),e _q (k), \u03c9(k)]<\/annotation><\/semantics><\/math>, where the first two terms are the PI integrator states, <math data-latex=\"e _d \u200b  (k),e _q \u200b  (k)\"><semantics><mrow><msub><mi>e<\/mi><mi>d<\/mi><\/msub><mtext>\u200b<\/mtext><mo form=\"prefix\" stretchy=\"false\">(<\/mo><mi>k<\/mi><mo form=\"postfix\" stretchy=\"false\">)<\/mo><mo separator=\"true\">,<\/mo><msub><mi>e<\/mi><mi>q<\/mi><\/msub><mtext>\u200b<\/mtext><mo form=\"prefix\" stretchy=\"false\">(<\/mo><mi>k<\/mi><mo form=\"postfix\" stretchy=\"false\">)<\/mo><\/mrow><annotation encoding=\"application\/x-tex\">e _d \u200b (k),e _q \u200b (k)<\/annotation><\/semantics><\/math> are the error terms used by the discrete PI controller, and <math data-latex=\"\\omega(k)\"><semantics><mrow><mi>\u03c9<\/mi><mo form=\"prefix\" stretchy=\"false\">(<\/mo><mi>k<\/mi><mo form=\"postfix\" stretchy=\"false\">)<\/mo><\/mrow><annotation encoding=\"application\/x-tex\">\\omega(k)<\/annotation><\/semantics><\/math> is the measured grid angular frequency.<\/li>\n\n\n\n<li><strong>Hidden layers:&nbsp;<\/strong>two layers, containing 32 and 16&nbsp;neurons, respectively.<\/li>\n\n\n\n<li><strong>Outputs (2 regression outputs):<\/strong> <math data-latex=\"y(k)=[E_{g,d} \u200b  (k),E_{g,q} \u200b  (k)]\"><semantics><mrow><mi>y<\/mi><mo form=\"prefix\" stretchy=\"false\">(<\/mo><mi>k<\/mi><mo form=\"postfix\" stretchy=\"false\">)<\/mo><mo>=<\/mo><mo form=\"prefix\" stretchy=\"false\">[<\/mo><msub><mi>E<\/mi><mrow><mi>g<\/mi><mo separator=\"true\">,<\/mo><mi>d<\/mi><\/mrow><\/msub><mtext>\u200b<\/mtext><mo form=\"prefix\" stretchy=\"false\">(<\/mo><mi>k<\/mi><mo form=\"postfix\" stretchy=\"false\">)<\/mo><mo separator=\"true\">,<\/mo><msub><mi>E<\/mi><mrow><mi>g<\/mi><mo separator=\"true\">,<\/mo><mi>q<\/mi><\/mrow><\/msub><mtext>\u200b<\/mtext><mo form=\"prefix\" stretchy=\"false\">(<\/mo><mi>k<\/mi><mo form=\"postfix\" stretchy=\"false\">)<\/mo><mo form=\"postfix\" stretchy=\"false\">]<\/mo><\/mrow><annotation encoding=\"application\/x-tex\">y(k)=[E_{g,d} \u200b (k),E_{g,q} \u200b (k)]<\/annotation><\/semantics><\/math>. <\/li>\n\n\n\n<li><strong>Activation function:<\/strong> tansig is used in hidden layers and pure linear is used in the output layer (see <a href=\"#figure3\" type=\"internal\" id=\"#figure3\">Fig. 3<\/a>).<\/li>\n<\/ul>\n\n\n\n<p>This structure ensures that the FNN can act as a&nbsp;replacement&nbsp;for the PI controller. It receives the same internal control signals as a discrete PI controller would receive, and produces the same voltage reference quantities.<\/p>\n\n\n<style>.wp-block-kadence-advancedheading.kt-adv-heading43088_f37411-26, .wp-block-kadence-advancedheading.kt-adv-heading43088_f37411-26[data-kb-block=\"kb-adv-heading43088_f37411-26\"]{font-style:normal;}.wp-block-kadence-advancedheading.kt-adv-heading43088_f37411-26 mark.kt-highlight, .wp-block-kadence-advancedheading.kt-adv-heading43088_f37411-26[data-kb-block=\"kb-adv-heading43088_f37411-26\"] mark.kt-highlight{font-style:normal;color:#f76a0c;-webkit-box-decoration-break:clone;box-decoration-break:clone;padding-top:0px;padding-right:0px;padding-bottom:0px;padding-left:0px;}.wp-block-kadence-advancedheading.kt-adv-heading43088_f37411-26 img.kb-inline-image, .wp-block-kadence-advancedheading.kt-adv-heading43088_f37411-26[data-kb-block=\"kb-adv-heading43088_f37411-26\"] img.kb-inline-image{width:150px;vertical-align:baseline;}<\/style>\n<h3 class=\"kt-adv-heading43088_f37411-26 wp-block-kadence-advancedheading\" data-kb-block=\"kb-adv-heading43088_f37411-26\"><span class=\"ez-toc-section\" id=\"Data-collection-and-preprocessing\"><\/span>Data collection and preprocessing<span class=\"ez-toc-section-end\"><\/span><\/h3>\n\n\n\n<p>Training data is generated using a <a href=\"https:\/\/imperix.com\/doc\/implementation\/vector-current-control?currentThread=static-synchronous-compensator-statcom\" type=\"link\" id=\"https:\/\/imperix.com\/doc\/implementation\/vector-current-control?currentThread=static-synchronous-compensator-statcom\">vector current control<\/a> scheme by simulating the grid-following converter in Simulink across a range of operating conditions and reference variations, e.g., different profiles for&nbsp;<math xmlns=\"http:\/\/www.w3.org\/1998\/Math\/MathML\"><semantics><mrow><msubsup><mi>i<\/mi><mi>d<\/mi><mrow><mi>r<\/mi><mi>e<\/mi><mi>f<\/mi><\/mrow><\/msubsup><\/mrow><\/semantics><\/math>and&nbsp;<math xmlns=\"http:\/\/www.w3.org\/1998\/Math\/MathML\"><semantics><mrow><msubsup><mi>i<\/mi><mi>q<\/mi><mrow><mi>r<\/mi><mi>e<\/mi><mi>f<\/mi><\/mrow><\/msubsup><\/mrow><\/semantics><\/math>, different DC-link voltage, and grid voltage magnitudes. Training data should be gathered by manipulating the variables in such a way as to cover the desired solutionspace. For each simulation run, signals are logged&nbsp;and consolidated into a&nbsp;measurements array.<\/p>\n\n\n\n<div class=\"wp-block-simple-alerts-for-gutenberg-alert-boxes sab-alert sab-alert-info\" role=\"alert\">The number of elements in each sample and the sampling rate should be consistent throughout the training dataset.<\/div>\n\n\n\n<p>All experiments are concatenated into a single supervised dataset by stacking time samples from each run. In this case, the total number of samples in the training dataset is 345,632. After that, invalid samples are removed. This step is important when concatenating logs from many simulation runs, especially when protections may cause undefined values during transients or failed runs. To stabilize training and keep signals in comparable numeric ranges, fixed scaling factors are applied. This normalization is part of the controller definition: the&nbsp;same scaling must be reproduced in Simulink at inference time, and FNN outputs must be de-scaled back before being used.<\/p>\n\n\n\n<div class=\"wp-block-simple-alerts-for-gutenberg-alert-boxes sab-alert sab-alert-warning\" role=\"alert\">Any dimension of the input vector that is not important must be removed so that the FNN doesn&#8217;t have to learn features that are not important or play a very small role.<\/div>\n\n\n\n<h3 class=\"wp-block-heading\"><span class=\"ez-toc-section\" id=\"Training-and-deployment\"><\/span>Training and deployment<span class=\"ez-toc-section-end\"><\/span><\/h3>\n\n\n\n<p>The <a href=\"https:\/\/en.wikipedia.org\/wiki\/Levenberg%E2%80%93Marquardt_algorithm\" type=\"link\" id=\"https:\/\/en.wikipedia.org\/wiki\/Levenberg%E2%80%93Marquardt_algorithm\">Levenberg-Marquardt<\/a> optimization algorithm is selected to train the FNN, as it is effective when the network is moderate in size, and it also converges relatively fast for small to moderate multi-layer perceptron regression problems. Additionally, Mean Squared Error (MSE) is chosen as the loss function. Some of the key implemented hyperparameters are:<\/p>\n\n\n\n<ul id=\"hyperparameters\" class=\"wp-block-list\">\n<li><strong>Maximum epochs (2000):<\/strong> The absolute limit on training iterations if the network does not reach convergence earlier. <\/li>\n\n\n\n<li><strong>Performance goal (1e-7):<\/strong> The target MSE; training halts successfully if the loss falls below this threshold.<\/li>\n\n\n\n<li><strong>Initial learning rate (1e-3):<\/strong> The starting step size used by the optimizer to update the network weights.<\/li>\n\n\n\n<li><strong>Minimum gradient (1e-8):<\/strong> The threshold at which weight updates become numerically insignificant. If the gradient drops below this value, the optimizer has reached a flat region and training stops.<\/li>\n\n\n\n<li><strong>Validation early stopping (<code>max_fail = 20<\/code>):<\/strong> The number of consecutive epochs the validation loss is permitted to worsen before training is terminated. This prevents overfitting while allowing for typical, temporary fluctuations in the loss curve.)<\/li>\n<\/ul>\n\n\n\n<p>The data is divided into a standard <strong>70\/15\/15<\/strong> split (training\/validation\/test) to ensure robust learning and unbiased performance evaluation. Training yields a final network with optimized weights and biases. For deployment, the trained model can be integrated into Simulink by generating a callable MATLAB function&nbsp;with&nbsp;genFunction().<\/p>\n\n\n\n<div class=\"wp-block-simple-alerts-for-gutenberg-alert-boxes sab-alert sab-alert-warning\" role=\"alert\">An FNN is considered well-trained if it achieves low error on validation or test data (not only on the training set) and shows no large outliers or unstable jumps in its output over the intended operating range. A low training loss with a higher validation loss indicates overfitting. After checking the performance of the controller in simulation, the neural network architecture and training parameters can be iterated to improve the controller&#8217;s performance and find the best model.<\/div>\n\n\n<style>.wp-block-kadence-advancedheading.kt-adv-heading43088_476433-e6, .wp-block-kadence-advancedheading.kt-adv-heading43088_476433-e6[data-kb-block=\"kb-adv-heading43088_476433-e6\"]{font-style:normal;}.wp-block-kadence-advancedheading.kt-adv-heading43088_476433-e6 mark.kt-highlight, .wp-block-kadence-advancedheading.kt-adv-heading43088_476433-e6[data-kb-block=\"kb-adv-heading43088_476433-e6\"] mark.kt-highlight{font-style:normal;color:#f76a0c;-webkit-box-decoration-break:clone;box-decoration-break:clone;padding-top:0px;padding-right:0px;padding-bottom:0px;padding-left:0px;}.wp-block-kadence-advancedheading.kt-adv-heading43088_476433-e6 img.kb-inline-image, .wp-block-kadence-advancedheading.kt-adv-heading43088_476433-e6[data-kb-block=\"kb-adv-heading43088_476433-e6\"] img.kb-inline-image{width:150px;vertical-align:baseline;}<\/style>\n<h3 class=\"kt-adv-heading43088_476433-e6 wp-block-kadence-advancedheading\" data-kb-block=\"kb-adv-heading43088_476433-e6\"><span class=\"ez-toc-section\" id=\"Tips-to-improve-the-ANN-performance\"><\/span>Tips to improve the ANN performance<span class=\"ez-toc-section-end\"><\/span><\/h3>\n\n\n\n<ul class=\"wp-block-list\">\n<li>It is recommended to excite the system with reference steps and parameter variations that span the full expected operating condition (grid strength, <math xmlns=\"http:\/\/www.w3.org\/1998\/Math\/MathML\"><semantics><mrow><msub><mi>V<\/mi><mrow><mi>d<\/mi><mi>c<\/mi><\/mrow><\/msub><\/mrow><annotation encoding=\"application\/x-tex\">V_{dc}<\/annotation><\/semantics><\/math>\u200b , <math xmlns=\"http:\/\/www.w3.org\/1998\/Math\/MathML\"><semantics><mrow><msub><mi>V<\/mi><mi>g<\/mi><\/msub><\/mrow><annotation encoding=\"application\/x-tex\">V_g<\/annotation><\/semantics><\/math>\u200b, frequency drift, etc.). It is important to include the transients and not only the steady state.<\/li>\n\n\n\n<li>Use fixed scaling (per-unit or rated values). Try to analyze the data before training. Look for extreme outliers and remove them before scaling to avoid training dominated by rare events or spikes.<\/li>\n\n\n\n<li>Choose an input vector carefully that reflects control causality and avoid inputs that add noise without information.<\/li>\n\n\n\n<li>Increasing the number of fully connected layers and the number of neurons might increase the model&#8217;s performance, but it also increases the chances of overfitting.<\/li>\n\n\n\n<li>During training, try to monitor your validation loss. If it stops improving for a set number of epochs, training can be manually stopped to avoid overfitting the training set.<\/li>\n\n\n\n<li>If your loss isn&#8217;t moving at all, check your learning rate first. It\u2019s usually either way too high, causing exploding gradients, or too low, making progress invisible.<\/li>\n<\/ul>\n\n\n<style>.wp-block-kadence-advancedheading.kt-adv-heading43088_a2e1cc-62, .wp-block-kadence-advancedheading.kt-adv-heading43088_a2e1cc-62[data-kb-block=\"kb-adv-heading43088_a2e1cc-62\"]{font-style:normal;}.wp-block-kadence-advancedheading.kt-adv-heading43088_a2e1cc-62 mark.kt-highlight, .wp-block-kadence-advancedheading.kt-adv-heading43088_a2e1cc-62[data-kb-block=\"kb-adv-heading43088_a2e1cc-62\"] mark.kt-highlight{font-style:normal;color:#f76a0c;-webkit-box-decoration-break:clone;box-decoration-break:clone;padding-top:0px;padding-right:0px;padding-bottom:0px;padding-left:0px;}.wp-block-kadence-advancedheading.kt-adv-heading43088_a2e1cc-62 img.kb-inline-image, .wp-block-kadence-advancedheading.kt-adv-heading43088_a2e1cc-62[data-kb-block=\"kb-adv-heading43088_a2e1cc-62\"] img.kb-inline-image{width:150px;vertical-align:baseline;}<\/style>\n<h2 class=\"kt-adv-heading43088_a2e1cc-62 wp-block-kadence-advancedheading\" data-kb-block=\"kb-adv-heading43088_a2e1cc-62\"><span class=\"ez-toc-section\" id=\"Implementation-of-FNN-based-controller\"><\/span>Implementation of FNN-based controller<span class=\"ez-toc-section-end\"><\/span><\/h2>\n\n\n<style>.wp-block-kadence-advancedheading.kt-adv-heading43088_9b4657-8f, .wp-block-kadence-advancedheading.kt-adv-heading43088_9b4657-8f[data-kb-block=\"kb-adv-heading43088_9b4657-8f\"]{font-style:normal;}.wp-block-kadence-advancedheading.kt-adv-heading43088_9b4657-8f mark.kt-highlight, .wp-block-kadence-advancedheading.kt-adv-heading43088_9b4657-8f[data-kb-block=\"kb-adv-heading43088_9b4657-8f\"] mark.kt-highlight{font-style:normal;color:#f76a0c;-webkit-box-decoration-break:clone;box-decoration-break:clone;padding-top:0px;padding-right:0px;padding-bottom:0px;padding-left:0px;}.wp-block-kadence-advancedheading.kt-adv-heading43088_9b4657-8f img.kb-inline-image, .wp-block-kadence-advancedheading.kt-adv-heading43088_9b4657-8f[data-kb-block=\"kb-adv-heading43088_9b4657-8f\"] img.kb-inline-image{width:150px;vertical-align:baseline;}<\/style>\n<h3 class=\"kt-adv-heading43088_9b4657-8f wp-block-kadence-advancedheading\" data-kb-block=\"kb-adv-heading43088_9b4657-8f\"><span class=\"ez-toc-section\" id=\"Experimental-setup\"><\/span>Experimental setup<span class=\"ez-toc-section-end\"><\/span><\/h3>\n\n\n\n<p>The setup (see Fig. 5) used to validate the proposed ANN-based control of the output current of the converter includes the following imperix products:<\/p>\n\n\n\n<div class=\"wp-block-columns is-layout-flex wp-container-core-columns-is-layout-9d6595d7 wp-block-columns-is-layout-flex\">\n<div class=\"wp-block-column is-layout-flow wp-block-column-is-layout-flow\">\n<ul class=\"wp-block-list\">\n<li><a href=\"https:\/\/imperix.com\/products\/power\/programmable-inverter\/\">TPI8032<\/a>, 22kW all-in-one programmable inverter<\/li>\n\n\n\n<li><a href=\"https:\/\/imperix.com\/software\/acg-sdk\" target=\"_blank\" rel=\"noreferrer noopener\">ACG SDK toolbox<\/a>&nbsp;for automated generation of the controller code from Simulink<\/li>\n<\/ul>\n\n\n\n<p>and additional components :<\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li>1x DC power supply<\/li>\n\n\n\n<li>Three-phase grid connection or three-phase power amplifier<\/li>\n<\/ul>\n<\/div>\n\n\n\n<div class=\"wp-block-column is-layout-flow wp-block-column-is-layout-flow\"><style>.kb-image43088_aaeff1-c2.kb-image-is-ratio-size, .kb-image43088_aaeff1-c2 .kb-image-is-ratio-size{max-width:278px;width:100%;}.wp-block-kadence-column > .kt-inside-inner-col > .kb-image43088_aaeff1-c2.kb-image-is-ratio-size, .wp-block-kadence-column > .kt-inside-inner-col > .kb-image43088_aaeff1-c2 .kb-image-is-ratio-size{align-self:unset;}.kb-image43088_aaeff1-c2 figure{max-width:278px;}.kb-image43088_aaeff1-c2 .image-is-svg, .kb-image43088_aaeff1-c2 .image-is-svg img{width:100%;}.kb-image43088_aaeff1-c2 .kb-image-has-overlay:after{opacity:0.3;}<\/style>\n<div class=\"wp-block-kadence-image kb-image43088_aaeff1-c2\"><figure class=\"aligncenter size-full\"><img loading=\"lazy\" decoding=\"async\" width=\"726\" height=\"1024\" src=\"https:\/\/imperix.com\/doc\/wp-content\/uploads\/2025\/10\/preview.jpg\" alt=\"\" class=\"kb-img wp-image-35039\" srcset=\"https:\/\/imperix.com\/doc\/wp-content\/uploads\/2025\/10\/preview.jpg 726w, https:\/\/imperix.com\/doc\/wp-content\/uploads\/2025\/10\/preview-213x300.jpg 213w\" sizes=\"auto, (max-width: 726px) 100vw, 726px\" \/><figcaption>Fig. 5: Test setup used for experiments <\/figcaption><\/figure><\/div>\n<\/div>\n<\/div>\n\n\n<style>.wp-block-kadence-advancedheading.kt-adv-heading43088_afa5d1-93, .wp-block-kadence-advancedheading.kt-adv-heading43088_afa5d1-93[data-kb-block=\"kb-adv-heading43088_afa5d1-93\"]{font-style:normal;}.wp-block-kadence-advancedheading.kt-adv-heading43088_afa5d1-93 mark.kt-highlight, .wp-block-kadence-advancedheading.kt-adv-heading43088_afa5d1-93[data-kb-block=\"kb-adv-heading43088_afa5d1-93\"] mark.kt-highlight{font-style:normal;color:#f76a0c;-webkit-box-decoration-break:clone;box-decoration-break:clone;padding-top:0px;padding-right:0px;padding-bottom:0px;padding-left:0px;}.wp-block-kadence-advancedheading.kt-adv-heading43088_afa5d1-93 img.kb-inline-image, .wp-block-kadence-advancedheading.kt-adv-heading43088_afa5d1-93[data-kb-block=\"kb-adv-heading43088_afa5d1-93\"] img.kb-inline-image{width:150px;vertical-align:baseline;}<\/style>\n<h3 class=\"kt-adv-heading43088_afa5d1-93 wp-block-kadence-advancedheading\" data-kb-block=\"kb-adv-heading43088_afa5d1-93\"><span class=\"ez-toc-section\" id=\"Downloads\"><\/span>Downloads<span class=\"ez-toc-section-end\"><\/span><\/h3>\n\n\n\n<p>The full Simulink model, as shown in Fig. 6, with a trained neural network whose parameters are saved as <code>nn_parameters.mat<\/code>, is available for download using the link below:<\/p>\n\n\n\n<div class=\"wp-block-file\"><a id=\"wp-block-file--media-7505adea-37e0-4d2b-aace-dd87367bb9ee\" href=\"https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/02\/TN179_Data_driven_control_Simulink.zip\">TN179_Data_driven_control_Simulink<\/a><a href=\"https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/02\/TN179_Data_driven_control_Simulink.zip\" class=\"wp-block-file__button wp-element-button\" download aria-describedby=\"wp-block-file--media-7505adea-37e0-4d2b-aace-dd87367bb9ee\">Download<\/a><\/div>\n\n\n<div class=\"wp-block-image\">\n<figure class=\"aligncenter size-large\"><img loading=\"lazy\" decoding=\"async\" width=\"1024\" height=\"479\" src=\"https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/01\/image-13-1024x479.png\" alt=\"\" class=\"wp-image-41045\" srcset=\"https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/01\/image-13-1024x479.png 1024w, https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/01\/image-13-300x140.png 300w, https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/01\/image-13-768x359.png 768w, https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/01\/image-13-1536x719.png 1536w, https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/01\/image-13.png 1601w\" sizes=\"auto, (max-width: 1024px) 100vw, 1024px\" \/><figcaption class=\"wp-element-caption\">Fig. 6: Implementation of data-driven control in Simulink<\/figcaption><\/figure>\n<\/div>\n\n<style>.wp-block-kadence-advancedheading.kt-adv-heading43088_0f08ad-32, .wp-block-kadence-advancedheading.kt-adv-heading43088_0f08ad-32[data-kb-block=\"kb-adv-heading43088_0f08ad-32\"]{font-style:normal;}.wp-block-kadence-advancedheading.kt-adv-heading43088_0f08ad-32 mark.kt-highlight, .wp-block-kadence-advancedheading.kt-adv-heading43088_0f08ad-32[data-kb-block=\"kb-adv-heading43088_0f08ad-32\"] mark.kt-highlight{font-style:normal;color:#f76a0c;-webkit-box-decoration-break:clone;box-decoration-break:clone;padding-top:0px;padding-right:0px;padding-bottom:0px;padding-left:0px;}.wp-block-kadence-advancedheading.kt-adv-heading43088_0f08ad-32 img.kb-inline-image, .wp-block-kadence-advancedheading.kt-adv-heading43088_0f08ad-32[data-kb-block=\"kb-adv-heading43088_0f08ad-32\"] img.kb-inline-image{width:150px;vertical-align:baseline;}<\/style>\n<h3 class=\"kt-adv-heading43088_0f08ad-32 wp-block-kadence-advancedheading\" data-kb-block=\"kb-adv-heading43088_0f08ad-32\"><span class=\"ez-toc-section\" id=\"Experimental-results\"><\/span>Experimental results<span class=\"ez-toc-section-end\"><\/span><\/h3>\n\n\n\n<p>To experimentally validate the performance of the FNN-based controller, it is implemented side-by-side with the vector current controller in the Simulink model, as shown in Fig. 7.<\/p>\n\n\n<div class=\"wp-block-image\">\n<figure class=\"aligncenter size-large\"><img loading=\"lazy\" decoding=\"async\" width=\"1024\" height=\"570\" src=\"https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/01\/image-16-1024x570.png\" alt=\"\" class=\"wp-image-41395\" srcset=\"https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/01\/image-16-1024x570.png 1024w, https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/01\/image-16-300x167.png 300w, https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/01\/image-16-768x428.png 768w, https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/01\/image-16.png 1122w\" sizes=\"auto, (max-width: 1024px) 100vw, 1024px\" \/><figcaption class=\"wp-element-caption\"><br>Fig. 7: FNN-based controller implementation in Simulink<\/figcaption><\/figure>\n<\/div>\n\n\n<p>The experimental performance of the TPI8032 was evaluated at a switching and control frequency of 20 kHz with a line-to-neutral grid voltage of 230 V at the point of common coupling (PCC). To assess control robustness, a dynamic test scenario was configured in which the DC link voltage is programmed to drop from approximately 700 V to 600 V, right after the application of a step-input of 30A in the d-axis current reference. This DC link voltage drop, therefore, coincides with a step change in the d-axis output current reference (<math xmlns=\"http:\/\/www.w3.org\/1998\/Math\/MathML\"><semantics><mrow><msubsup><mi>I<\/mi><mrow><mi>g<\/mi><mo separator=\"true\">,<\/mo><mi>d<\/mi><\/mrow><mo>\u2217<\/mo><\/msubsup><\/mrow><\/semantics><\/math>\u200b) from 0 A to 30 A at t =5ms.<\/p>\n\n\n\n<p>As illustrated in the results shown in Fig. 8, the FNN-based controller demonstrates a tracking performance similar to a PI controller. Despite the varying DC link voltage, a dynamic condition that was notably excluded from the training dataset, the FNN response (blue trace) closely tracks the reference and exhibits a transient response that nearly replicates the conventional PI-based vector current control scheme (dashed orange trace). This confirms that the FNN has successfully generalized its learning to respond to unseen operating conditions. Furthermore, the implementation proves to be computationally efficient; the execution of the FNN algorithm on the <a href=\"https:\/\/imperix.com\/products\/control\/inverter-control-board\/\">B-Board PRO<\/a> embedded platform of <a href=\"https:\/\/imperix.com\/products\/power\/programmable-inverter\/\">TPI8032<\/a> consumes between 35% and 45% of the available cycle time at 20 kHz, ensuring the computational load remains well within the CPU&#8217;s capacity limits.<\/p>\n\n\n<style>.kb-image43088_25c02f-13.kb-image-is-ratio-size, .kb-image43088_25c02f-13 .kb-image-is-ratio-size{max-width:696px;width:100%;}.wp-block-kadence-column > .kt-inside-inner-col > .kb-image43088_25c02f-13.kb-image-is-ratio-size, .wp-block-kadence-column > .kt-inside-inner-col > .kb-image43088_25c02f-13 .kb-image-is-ratio-size{align-self:unset;}.kb-image43088_25c02f-13 figure{max-width:696px;}.kb-image43088_25c02f-13 .image-is-svg, .kb-image43088_25c02f-13 .image-is-svg img{width:100%;}.kb-image43088_25c02f-13 .kb-image-has-overlay:after{opacity:0.3;}<\/style>\n<div class=\"wp-block-kadence-image kb-image43088_25c02f-13\"><figure class=\"aligncenter size-full\"><img loading=\"lazy\" decoding=\"async\" width=\"780\" height=\"600\" src=\"https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/02\/ANN_vs_PI_results-1.png\" alt=\"\" class=\"kb-img wp-image-42928\" srcset=\"https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/02\/ANN_vs_PI_results-1.png 780w, https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/02\/ANN_vs_PI_results-1-300x231.png 300w, https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/02\/ANN_vs_PI_results-1-768x591.png 768w\" sizes=\"auto, (max-width: 780px) 100vw, 780px\" \/><figcaption>Fig. 8: Controller performance comparison between the PI current controller and the ANN-based current controller<\/figcaption><\/figure><\/div>\n\n\n<style>.wp-block-kadence-advancedheading.kt-adv-heading43088_6e56fb-3d, .wp-block-kadence-advancedheading.kt-adv-heading43088_6e56fb-3d[data-kb-block=\"kb-adv-heading43088_6e56fb-3d\"]{font-style:normal;}.wp-block-kadence-advancedheading.kt-adv-heading43088_6e56fb-3d mark.kt-highlight, .wp-block-kadence-advancedheading.kt-adv-heading43088_6e56fb-3d[data-kb-block=\"kb-adv-heading43088_6e56fb-3d\"] mark.kt-highlight{font-style:normal;color:#f76a0c;-webkit-box-decoration-break:clone;box-decoration-break:clone;padding-top:0px;padding-right:0px;padding-bottom:0px;padding-left:0px;}.wp-block-kadence-advancedheading.kt-adv-heading43088_6e56fb-3d img.kb-inline-image, .wp-block-kadence-advancedheading.kt-adv-heading43088_6e56fb-3d[data-kb-block=\"kb-adv-heading43088_6e56fb-3d\"] img.kb-inline-image{width:150px;vertical-align:baseline;}<\/style>\n<h2 class=\"kt-adv-heading43088_6e56fb-3d wp-block-kadence-advancedheading\" data-kb-block=\"kb-adv-heading43088_6e56fb-3d\"><span class=\"ez-toc-section\" id=\"Conclusion\"><\/span>Conclusion<span class=\"ez-toc-section-end\"><\/span><\/h2>\n\n\n\n<p>The example implementation demonstrates that a shallow ANN can effectively emulate the mapping of a discrete PI current controller in a grid-following converter while maintaining the deterministic, low-latency execution required for a real-time embedded target. However, the success of the approach is strictly dependent on the quality and breadth of the training dataset. While the ANN-based controller showed an ability to generalize to conditions like varying DC-link voltages not explicitly included in training, insufficient data coverage or limited training time remains a primary factor that can lead to subpar performance during unexpected transients. Rigorous cross-validation and the implementation of auxiliary safety mechanisms are essential to mitigate the risk of instability.&nbsp;Contrary to the assumption that ANN requires heavy computing power, this implementation proves that shallow ANNs can be effective and sufficient for power electronics control applications.<\/p>\n\n\n\n<h2 class=\"wp-block-heading\"><span class=\"ez-toc-section\" id=\"References\"><\/span>References<span class=\"ez-toc-section-end\"><\/span><\/h2>\n\n\n\n<p>[1] M. R. G. Meireles, P. E. M. Almeida and M. G. Simoes, &#8220;A comprehensive review for industrial applicability of artificial neural networks,&#8221; in&nbsp;<em>IEEE Transactions on Industrial Electronics<\/em>, vol. 50, no. 3, pp. 585-601, June 2003.<\/p>\n\n\n\n<p>[2] S. Zhao, F. Blaabjerg and H. Wang, &#8220;An Overview of Artificial Intelligence Applications for Power Electronics,&#8221; in&nbsp;<em>IEEE Transactions on Power Electronics<\/em>, April 2021.<\/p>\n\n\n\n<p>[3] B. K. Bose, &#8220;Neural Network Applications in Power Electronics and Motor Drives\u2014An Introduction and Perspective,&#8221; in&nbsp;<em>IEEE Transactions on Industrial Electronics<\/em>, Feb. 2007.<\/p>\n\n\n\n<p>[4] S. Brunton, and J. Nathan Kutz,&nbsp;&#8220;<em>Data-driven science and engineering: Machine learning, dynamical systems, and control<\/em>&#8220;, Cambridge University Press, 2022.<\/p>\n\n\n<style>.wp-block-kadence-advancedheading.kt-adv-heading43088_ff26e5-78, .wp-block-kadence-advancedheading.kt-adv-heading43088_ff26e5-78[data-kb-block=\"kb-adv-heading43088_ff26e5-78\"]{font-style:normal;}.wp-block-kadence-advancedheading.kt-adv-heading43088_ff26e5-78 mark.kt-highlight, .wp-block-kadence-advancedheading.kt-adv-heading43088_ff26e5-78[data-kb-block=\"kb-adv-heading43088_ff26e5-78\"] mark.kt-highlight{font-style:normal;color:#f76a0c;-webkit-box-decoration-break:clone;box-decoration-break:clone;padding-top:0px;padding-right:0px;padding-bottom:0px;padding-left:0px;}.wp-block-kadence-advancedheading.kt-adv-heading43088_ff26e5-78 img.kb-inline-image, .wp-block-kadence-advancedheading.kt-adv-heading43088_ff26e5-78[data-kb-block=\"kb-adv-heading43088_ff26e5-78\"] img.kb-inline-image{width:150px;vertical-align:baseline;}<\/style>\n<h2 class=\"kt-adv-heading43088_ff26e5-78 wp-block-kadence-advancedheading\" data-kb-block=\"kb-adv-heading43088_ff26e5-78\"><span class=\"ez-toc-section\" id=\"To-go-further-from-here%E2%80%A6\"><\/span>To go further from here&#8230;<span class=\"ez-toc-section-end\"><\/span><\/h2>\n\n\n\n<p>More information on the control of TPI8032 as a grid-forming inverter can be found in <a href=\"https:\/\/imperix.com\/doc\/implementation\/grid-forming-inverter?currentThread=solid-state-transformer-sst-for-mc-lv-smart-grid\">TN167<\/a>. The following are some of the other notes that can be useful:<\/p>\n\n\n\n<p><a href=\"https:\/\/imperix.com\/doc\/help\/tpi-quick-start-guide\">PN190: Getting started with th TPI8032<\/a><br><a href=\"https:\/\/imperix.com\/doc\/implementation\/active-front-end?currentThread=active-front-end-afe\">TN166: Active front end (AFE)<\/a><\/p>\n","protected":false},"excerpt":{"rendered":"<p>Conventional model-based control in power electronics relies heavily on deriving precise mathematical models of the physical system. In contrast, data-driven control shifts this paradigm by&#8230;<\/p>\n","protected":false},"author":29,"featured_media":43226,"comment_status":"open","ping_status":"open","sticky":false,"template":"","format":"standard","meta":{"_acf_changed":false,"_kad_post_transparent":"","_kad_post_title":"","_kad_post_layout":"","_kad_post_sidebar_id":"","_kad_post_content_style":"","_kad_post_vertical_padding":"","_kad_post_feature":"","_kad_post_feature_position":"","_kad_post_header":false,"_kad_post_footer":false,"_kad_post_classname":"","footnotes":""},"categories":[4],"tags":[],"software-environments":[103],"provided-results":[108],"related-products":[50,110],"guidedreadings":[],"tutorials":[],"user-manuals":[],"coauthors":[153],"class_list":["post-43088","post","type-post","status-publish","format-standard","has-post-thumbnail","hentry","category-implementation","software-environments-matlab","provided-results-experimental","related-products-acg-sdk","related-products-tpi"],"acf":[],"yoast_head":"<!-- This site is optimized with the Yoast SEO plugin v27.3 - https:\/\/yoast.com\/product\/yoast-seo-wordpress\/ -->\n<title>ANN-based control of a three-phase inverter - imperix<\/title>\n<meta name=\"description\" content=\"This technical note implements an ANN-based control for a grid-connected inverter. The traditional PI current regulator is replaced with a feedforward neural network (FNN)\" \/>\n<meta name=\"robots\" content=\"index, follow, max-snippet:-1, max-image-preview:large, max-video-preview:-1\" \/>\n<link rel=\"canonical\" href=\"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter\" \/>\n<meta property=\"og:locale\" content=\"en_US\" \/>\n<meta property=\"og:type\" content=\"article\" \/>\n<meta property=\"og:title\" content=\"ANN-based control of a three-phase inverter - imperix\" \/>\n<meta property=\"og:description\" content=\"This technical note implements an ANN-based control for a grid-connected inverter. The traditional PI current regulator is replaced with a feedforward neural network (FNN)\" \/>\n<meta property=\"og:url\" content=\"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter\" \/>\n<meta property=\"og:site_name\" content=\"imperix\" \/>\n<meta property=\"article:published_time\" content=\"2026-02-25T10:42:54+00:00\" \/>\n<meta property=\"article:modified_time\" content=\"2026-02-25T10:42:55+00:00\" \/>\n<meta property=\"og:image\" content=\"https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/02\/feature_image-4.png\" \/>\n\t<meta property=\"og:image:width\" content=\"500\" \/>\n\t<meta property=\"og:image:height\" content=\"334\" \/>\n\t<meta property=\"og:image:type\" content=\"image\/png\" \/>\n<meta name=\"author\" content=\"Adeel Jamal\" \/>\n<meta name=\"twitter:card\" content=\"summary_large_image\" \/>\n<meta name=\"twitter:label1\" content=\"Written by\" \/>\n\t<meta name=\"twitter:data1\" content=\"Adeel Jamal\" \/>\n\t<meta name=\"twitter:label2\" content=\"Est. reading time\" \/>\n\t<meta name=\"twitter:data2\" content=\"17 minutes\" \/>\n<script type=\"application\/ld+json\" class=\"yoast-schema-graph\">{\"@context\":\"https:\\\/\\\/schema.org\",\"@graph\":[{\"@type\":\"Article\",\"@id\":\"https:\\\/\\\/imperix.com\\\/doc\\\/implementation\\\/ann-based-control-of-a-three-phase-inverter#article\",\"isPartOf\":{\"@id\":\"https:\\\/\\\/imperix.com\\\/doc\\\/implementation\\\/ann-based-control-of-a-three-phase-inverter\"},\"author\":{\"name\":\"Adeel Jamal\",\"@id\":\"https:\\\/\\\/imperix.com\\\/doc\\\/#\\\/schema\\\/person\\\/dadae0452988b0ab55c2f714a93d24b9\"},\"headline\":\"ANN-based control of a three-phase inverter\",\"datePublished\":\"2026-02-25T10:42:54+00:00\",\"dateModified\":\"2026-02-25T10:42:55+00:00\",\"mainEntityOfPage\":{\"@id\":\"https:\\\/\\\/imperix.com\\\/doc\\\/implementation\\\/ann-based-control-of-a-three-phase-inverter\"},\"wordCount\":3429,\"commentCount\":0,\"publisher\":{\"@id\":\"https:\\\/\\\/imperix.com\\\/doc\\\/#organization\"},\"image\":{\"@id\":\"https:\\\/\\\/imperix.com\\\/doc\\\/implementation\\\/ann-based-control-of-a-three-phase-inverter#primaryimage\"},\"thumbnailUrl\":\"https:\\\/\\\/imperix.com\\\/doc\\\/wp-content\\\/uploads\\\/2026\\\/02\\\/feature_image-4.png\",\"articleSection\":[\"Technical notes\"],\"inLanguage\":\"en-US\",\"potentialAction\":[{\"@type\":\"CommentAction\",\"name\":\"Comment\",\"target\":[\"https:\\\/\\\/imperix.com\\\/doc\\\/implementation\\\/ann-based-control-of-a-three-phase-inverter#respond\"]}]},{\"@type\":\"WebPage\",\"@id\":\"https:\\\/\\\/imperix.com\\\/doc\\\/implementation\\\/ann-based-control-of-a-three-phase-inverter\",\"url\":\"https:\\\/\\\/imperix.com\\\/doc\\\/implementation\\\/ann-based-control-of-a-three-phase-inverter\",\"name\":\"ANN-based control of a three-phase inverter - imperix\",\"isPartOf\":{\"@id\":\"https:\\\/\\\/imperix.com\\\/doc\\\/#website\"},\"primaryImageOfPage\":{\"@id\":\"https:\\\/\\\/imperix.com\\\/doc\\\/implementation\\\/ann-based-control-of-a-three-phase-inverter#primaryimage\"},\"image\":{\"@id\":\"https:\\\/\\\/imperix.com\\\/doc\\\/implementation\\\/ann-based-control-of-a-three-phase-inverter#primaryimage\"},\"thumbnailUrl\":\"https:\\\/\\\/imperix.com\\\/doc\\\/wp-content\\\/uploads\\\/2026\\\/02\\\/feature_image-4.png\",\"datePublished\":\"2026-02-25T10:42:54+00:00\",\"dateModified\":\"2026-02-25T10:42:55+00:00\",\"description\":\"This technical note implements an ANN-based control for a grid-connected inverter. The traditional PI current regulator is replaced with a feedforward neural network (FNN)\",\"breadcrumb\":{\"@id\":\"https:\\\/\\\/imperix.com\\\/doc\\\/implementation\\\/ann-based-control-of-a-three-phase-inverter#breadcrumb\"},\"inLanguage\":\"en-US\",\"potentialAction\":[{\"@type\":\"ReadAction\",\"target\":[\"https:\\\/\\\/imperix.com\\\/doc\\\/implementation\\\/ann-based-control-of-a-three-phase-inverter\"]}]},{\"@type\":\"ImageObject\",\"inLanguage\":\"en-US\",\"@id\":\"https:\\\/\\\/imperix.com\\\/doc\\\/implementation\\\/ann-based-control-of-a-three-phase-inverter#primaryimage\",\"url\":\"https:\\\/\\\/imperix.com\\\/doc\\\/wp-content\\\/uploads\\\/2026\\\/02\\\/feature_image-4.png\",\"contentUrl\":\"https:\\\/\\\/imperix.com\\\/doc\\\/wp-content\\\/uploads\\\/2026\\\/02\\\/feature_image-4.png\",\"width\":500,\"height\":334},{\"@type\":\"BreadcrumbList\",\"@id\":\"https:\\\/\\\/imperix.com\\\/doc\\\/implementation\\\/ann-based-control-of-a-three-phase-inverter#breadcrumb\",\"itemListElement\":[{\"@type\":\"ListItem\",\"position\":1,\"name\":\"Knowledge base\",\"item\":\"https:\\\/\\\/imperix.com\\\/doc\\\/\"},{\"@type\":\"ListItem\",\"position\":2,\"name\":\"Technical notes\",\"item\":\"https:\\\/\\\/imperix.com\\\/doc\\\/category\\\/implementation\"},{\"@type\":\"ListItem\",\"position\":3,\"name\":\"ANN-based control of a three-phase inverter\"}]},{\"@type\":\"WebSite\",\"@id\":\"https:\\\/\\\/imperix.com\\\/doc\\\/#website\",\"url\":\"https:\\\/\\\/imperix.com\\\/doc\\\/\",\"name\":\"imperix\",\"description\":\"power electronics\",\"publisher\":{\"@id\":\"https:\\\/\\\/imperix.com\\\/doc\\\/#organization\"},\"potentialAction\":[{\"@type\":\"SearchAction\",\"target\":{\"@type\":\"EntryPoint\",\"urlTemplate\":\"https:\\\/\\\/imperix.com\\\/doc\\\/?s={search_term_string}\"},\"query-input\":{\"@type\":\"PropertyValueSpecification\",\"valueRequired\":true,\"valueName\":\"search_term_string\"}}],\"inLanguage\":\"en-US\"},{\"@type\":\"Organization\",\"@id\":\"https:\\\/\\\/imperix.com\\\/doc\\\/#organization\",\"name\":\"imperix\",\"url\":\"https:\\\/\\\/imperix.com\\\/doc\\\/\",\"logo\":{\"@type\":\"ImageObject\",\"inLanguage\":\"en-US\",\"@id\":\"https:\\\/\\\/imperix.com\\\/doc\\\/#\\\/schema\\\/logo\\\/image\\\/\",\"url\":\"https:\\\/\\\/imperix.com\\\/doc\\\/wp-content\\\/uploads\\\/2021\\\/03\\\/imperix_logo.png\",\"contentUrl\":\"https:\\\/\\\/imperix.com\\\/doc\\\/wp-content\\\/uploads\\\/2021\\\/03\\\/imperix_logo.png\",\"width\":350,\"height\":120,\"caption\":\"imperix\"},\"image\":{\"@id\":\"https:\\\/\\\/imperix.com\\\/doc\\\/#\\\/schema\\\/logo\\\/image\\\/\"}},{\"@type\":\"Person\",\"@id\":\"https:\\\/\\\/imperix.com\\\/doc\\\/#\\\/schema\\\/person\\\/dadae0452988b0ab55c2f714a93d24b9\",\"name\":\"Adeel Jamal\",\"image\":{\"@type\":\"ImageObject\",\"inLanguage\":\"en-US\",\"@id\":\"https:\\\/\\\/secure.gravatar.com\\\/avatar\\\/b2123faeff454e8f7d9966142bddb5dc3a29288bfb3571afd8ecf52be937c100?s=96&d=mm&r=ge2117152862b116d8f386a69dbc1b0ff\",\"url\":\"https:\\\/\\\/secure.gravatar.com\\\/avatar\\\/b2123faeff454e8f7d9966142bddb5dc3a29288bfb3571afd8ecf52be937c100?s=96&d=mm&r=g\",\"contentUrl\":\"https:\\\/\\\/secure.gravatar.com\\\/avatar\\\/b2123faeff454e8f7d9966142bddb5dc3a29288bfb3571afd8ecf52be937c100?s=96&d=mm&r=g\",\"caption\":\"Adeel Jamal\"},\"description\":\"Adeel Jamal is consultant of power electronics and rapid control prototyping. He worked as a Research Associate in the Institute of Power Electronics and Control of Drives at Technical University of Darmstadt until the end of 2024. He received his B.E. in Electrical Engineering from the National University of Sciences and Technology, Islamabad, in 2014, and his M.Sc. in Power Engineering from the Technical University of Munich in 2018. He completed his Ph.D. research work in 2024. His research focuses on advanced control and modulation techniques for grid-tied and multi-level converters, with an emphasis on minimizing total harmonic distortion.\",\"sameAs\":[\"https:\\\/\\\/www.linkedin.com\\\/in\\\/adeel-j-siddiqui\\\/\"],\"url\":\"https:\\\/\\\/imperix.com\\\/doc\\\/author\\\/jamal\"}]}<\/script>\n<!-- \/ Yoast SEO plugin. -->","yoast_head_json":{"title":"ANN-based control of a three-phase inverter - imperix","description":"This technical note implements an ANN-based control for a grid-connected inverter. The traditional PI current regulator is replaced with a feedforward neural network (FNN)","robots":{"index":"index","follow":"follow","max-snippet":"max-snippet:-1","max-image-preview":"max-image-preview:large","max-video-preview":"max-video-preview:-1"},"canonical":"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter","og_locale":"en_US","og_type":"article","og_title":"ANN-based control of a three-phase inverter - imperix","og_description":"This technical note implements an ANN-based control for a grid-connected inverter. The traditional PI current regulator is replaced with a feedforward neural network (FNN)","og_url":"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter","og_site_name":"imperix","article_published_time":"2026-02-25T10:42:54+00:00","article_modified_time":"2026-02-25T10:42:55+00:00","og_image":[{"width":500,"height":334,"url":"https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/02\/feature_image-4.png","type":"image\/png"}],"author":"Adeel Jamal","twitter_card":"summary_large_image","twitter_misc":{"Written by":"Adeel Jamal","Est. reading time":"17 minutes"},"schema":{"@context":"https:\/\/schema.org","@graph":[{"@type":"Article","@id":"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter#article","isPartOf":{"@id":"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter"},"author":{"name":"Adeel Jamal","@id":"https:\/\/imperix.com\/doc\/#\/schema\/person\/dadae0452988b0ab55c2f714a93d24b9"},"headline":"ANN-based control of a three-phase inverter","datePublished":"2026-02-25T10:42:54+00:00","dateModified":"2026-02-25T10:42:55+00:00","mainEntityOfPage":{"@id":"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter"},"wordCount":3429,"commentCount":0,"publisher":{"@id":"https:\/\/imperix.com\/doc\/#organization"},"image":{"@id":"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter#primaryimage"},"thumbnailUrl":"https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/02\/feature_image-4.png","articleSection":["Technical notes"],"inLanguage":"en-US","potentialAction":[{"@type":"CommentAction","name":"Comment","target":["https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter#respond"]}]},{"@type":"WebPage","@id":"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter","url":"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter","name":"ANN-based control of a three-phase inverter - imperix","isPartOf":{"@id":"https:\/\/imperix.com\/doc\/#website"},"primaryImageOfPage":{"@id":"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter#primaryimage"},"image":{"@id":"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter#primaryimage"},"thumbnailUrl":"https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/02\/feature_image-4.png","datePublished":"2026-02-25T10:42:54+00:00","dateModified":"2026-02-25T10:42:55+00:00","description":"This technical note implements an ANN-based control for a grid-connected inverter. The traditional PI current regulator is replaced with a feedforward neural network (FNN)","breadcrumb":{"@id":"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter#breadcrumb"},"inLanguage":"en-US","potentialAction":[{"@type":"ReadAction","target":["https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter"]}]},{"@type":"ImageObject","inLanguage":"en-US","@id":"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter#primaryimage","url":"https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/02\/feature_image-4.png","contentUrl":"https:\/\/imperix.com\/doc\/wp-content\/uploads\/2026\/02\/feature_image-4.png","width":500,"height":334},{"@type":"BreadcrumbList","@id":"https:\/\/imperix.com\/doc\/implementation\/ann-based-control-of-a-three-phase-inverter#breadcrumb","itemListElement":[{"@type":"ListItem","position":1,"name":"Knowledge base","item":"https:\/\/imperix.com\/doc\/"},{"@type":"ListItem","position":2,"name":"Technical notes","item":"https:\/\/imperix.com\/doc\/category\/implementation"},{"@type":"ListItem","position":3,"name":"ANN-based control of a three-phase inverter"}]},{"@type":"WebSite","@id":"https:\/\/imperix.com\/doc\/#website","url":"https:\/\/imperix.com\/doc\/","name":"imperix","description":"power electronics","publisher":{"@id":"https:\/\/imperix.com\/doc\/#organization"},"potentialAction":[{"@type":"SearchAction","target":{"@type":"EntryPoint","urlTemplate":"https:\/\/imperix.com\/doc\/?s={search_term_string}"},"query-input":{"@type":"PropertyValueSpecification","valueRequired":true,"valueName":"search_term_string"}}],"inLanguage":"en-US"},{"@type":"Organization","@id":"https:\/\/imperix.com\/doc\/#organization","name":"imperix","url":"https:\/\/imperix.com\/doc\/","logo":{"@type":"ImageObject","inLanguage":"en-US","@id":"https:\/\/imperix.com\/doc\/#\/schema\/logo\/image\/","url":"https:\/\/imperix.com\/doc\/wp-content\/uploads\/2021\/03\/imperix_logo.png","contentUrl":"https:\/\/imperix.com\/doc\/wp-content\/uploads\/2021\/03\/imperix_logo.png","width":350,"height":120,"caption":"imperix"},"image":{"@id":"https:\/\/imperix.com\/doc\/#\/schema\/logo\/image\/"}},{"@type":"Person","@id":"https:\/\/imperix.com\/doc\/#\/schema\/person\/dadae0452988b0ab55c2f714a93d24b9","name":"Adeel Jamal","image":{"@type":"ImageObject","inLanguage":"en-US","@id":"https:\/\/secure.gravatar.com\/avatar\/b2123faeff454e8f7d9966142bddb5dc3a29288bfb3571afd8ecf52be937c100?s=96&d=mm&r=ge2117152862b116d8f386a69dbc1b0ff","url":"https:\/\/secure.gravatar.com\/avatar\/b2123faeff454e8f7d9966142bddb5dc3a29288bfb3571afd8ecf52be937c100?s=96&d=mm&r=g","contentUrl":"https:\/\/secure.gravatar.com\/avatar\/b2123faeff454e8f7d9966142bddb5dc3a29288bfb3571afd8ecf52be937c100?s=96&d=mm&r=g","caption":"Adeel Jamal"},"description":"Adeel Jamal is consultant of power electronics and rapid control prototyping. He worked as a Research Associate in the Institute of Power Electronics and Control of Drives at Technical University of Darmstadt until the end of 2024. He received his B.E. in Electrical Engineering from the National University of Sciences and Technology, Islamabad, in 2014, and his M.Sc. in Power Engineering from the Technical University of Munich in 2018. He completed his Ph.D. research work in 2024. His research focuses on advanced control and modulation techniques for grid-tied and multi-level converters, with an emphasis on minimizing total harmonic distortion.","sameAs":["https:\/\/www.linkedin.com\/in\/adeel-j-siddiqui\/"],"url":"https:\/\/imperix.com\/doc\/author\/jamal"}]}},"_links":{"self":[{"href":"https:\/\/imperix.com\/doc\/wp-json\/wp\/v2\/posts\/43088","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/imperix.com\/doc\/wp-json\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/imperix.com\/doc\/wp-json\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/imperix.com\/doc\/wp-json\/wp\/v2\/users\/29"}],"replies":[{"embeddable":true,"href":"https:\/\/imperix.com\/doc\/wp-json\/wp\/v2\/comments?post=43088"}],"version-history":[{"count":31,"href":"https:\/\/imperix.com\/doc\/wp-json\/wp\/v2\/posts\/43088\/revisions"}],"predecessor-version":[{"id":43246,"href":"https:\/\/imperix.com\/doc\/wp-json\/wp\/v2\/posts\/43088\/revisions\/43246"}],"wp:featuredmedia":[{"embeddable":true,"href":"https:\/\/imperix.com\/doc\/wp-json\/wp\/v2\/media\/43226"}],"wp:attachment":[{"href":"https:\/\/imperix.com\/doc\/wp-json\/wp\/v2\/media?parent=43088"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/imperix.com\/doc\/wp-json\/wp\/v2\/categories?post=43088"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/imperix.com\/doc\/wp-json\/wp\/v2\/tags?post=43088"},{"taxonomy":"software-environments","embeddable":true,"href":"https:\/\/imperix.com\/doc\/wp-json\/wp\/v2\/software-environments?post=43088"},{"taxonomy":"provided-results","embeddable":true,"href":"https:\/\/imperix.com\/doc\/wp-json\/wp\/v2\/provided-results?post=43088"},{"taxonomy":"related-products","embeddable":true,"href":"https:\/\/imperix.com\/doc\/wp-json\/wp\/v2\/related-products?post=43088"},{"taxonomy":"guidedreadings","embeddable":true,"href":"https:\/\/imperix.com\/doc\/wp-json\/wp\/v2\/guidedreadings?post=43088"},{"taxonomy":"tutorials","embeddable":true,"href":"https:\/\/imperix.com\/doc\/wp-json\/wp\/v2\/tutorials?post=43088"},{"taxonomy":"user-manuals","embeddable":true,"href":"https:\/\/imperix.com\/doc\/wp-json\/wp\/v2\/user-manuals?post=43088"},{"taxonomy":"author","embeddable":true,"href":"https:\/\/imperix.com\/doc\/wp-json\/wp\/v2\/coauthors?post=43088"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}