related work: continuation of equation learning section
Some checks failed
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.10) (push) Has been cancelled
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.6) (push) Has been cancelled
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, pre) (push) Has been cancelled
Some checks failed
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.10) (push) Has been cancelled
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.6) (push) Has been cancelled
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, pre) (push) Has been cancelled
This commit is contained in:
@ -219,13 +219,6 @@
|
||||
file = {PDF:C\:\\Users\\danwi\\Zotero\\storage\\GKAYMMNN\\Memarzia und Khunjush - 2015 - An In-depth Study on the Performance Impact of CUDA, OpenCL, and PTX Code.pdf:application/pdf},
|
||||
}
|
||||
|
||||
@online{noauthor_-depth_nodate,
|
||||
title = {An In-depth Study on the Performance Impact of {CUDA}, {OpenCL}, and {PTX} Code},
|
||||
url = {https://www.global-sci.org/intro/article_detail.html?journal=undefined&article_id=22555},
|
||||
urldate = {2024-12-01},
|
||||
file = {An In-depth Study on the Performance Impact of CUDA, OpenCL, and PTX Code:C\:\\Users\\danwi\\Zotero\\storage\\7CPIZPCF\\article_detail.html:text/html},
|
||||
}
|
||||
|
||||
@article{bastidas_fuertes_transpiler-based_2023,
|
||||
title = {Transpiler-Based Architecture Design Model for Back-End Layers in Software Development},
|
||||
volume = {13},
|
||||
@ -331,3 +324,71 @@ Publisher: Multidisciplinary Digital Publishing Institute},
|
||||
note = {Publisher: Proceedings of the National Academy of Sciences},
|
||||
file = {Full Text PDF:C\:\\Users\\danwi\\Zotero\\storage\\6R643NFZ\\Brunton et al. - 2016 - Discovering governing equations from data by sparse identification of nonlinear dynamical systems.pdf:application/pdf},
|
||||
}
|
||||
|
||||
@article{dong_evolving_2024,
|
||||
title = {Evolving Equation Learner For Symbolic Regression},
|
||||
issn = {1941-0026},
|
||||
url = {https://ieeexplore.ieee.org/abstract/document/10538006/metrics#metrics},
|
||||
doi = {10.1109/TEVC.2024.3404650},
|
||||
abstract = {Symbolic regression, a multifaceted optimization challenge involving the refinement of both structural components and coefficients, has gained significant research interest in recent years. The Equation Learner ({EQL}), a neural network designed to optimize both equation structure and coefficients through gradient-based optimization algorithms, has emerged as an important topic of concern within this field. Thus far, several variations of {EQL} have been introduced. Nevertheless, these existing {EQL} methodologies suffer from a fundamental constraint that they necessitate a predefined network structure. This limitation imposes constraints on the complexity of equations and makes them ill-suited for high-dimensional or high-order problem domains. To tackle the aforementioned shortcomings, we present a novel approach known as the evolving Equation Learner ({eEQL}). {eEQL} introduces a unique network structure characterized by automatically defined functions ({ADFs}). This new architectural design allows for dynamic adaptations of the network structure. Moreover, by engaging in self-learning and self-evolution during the search process, {eEQL} facilitates the generation of intricate, high-order, and constructive sub-functions. This enhancement can improve the accuracy and efficiency of the algorithm. To evaluate its performance, the proposed {eEQL} method has been tested across various datasets, including benchmark datasets, physics datasets, and real-world datasets. The results have demonstrated that our approach outperforms several well-known methods.},
|
||||
pages = {1--1},
|
||||
journaltitle = {{IEEE} Transactions on Evolutionary Computation},
|
||||
author = {Dong, Junlan and Zhong, Jinghui and Liu, Wei-Li and Zhang, Jun},
|
||||
urldate = {2025-02-26},
|
||||
date = {2024},
|
||||
note = {Conference Name: {IEEE} Transactions on Evolutionary Computation},
|
||||
keywords = {Optimization, Adaptation models, Complexity theory, Equation Learner, Evolutionary computation, Evolving equation learner, Mathematical models, Neural networks, Progressive Evolutionary Structure Search, Training},
|
||||
file = {IEEE Xplore Abstract Record:C\:\\Users\\danwi\\Zotero\\storage\\8PQADTZP\\metrics.html:text/html},
|
||||
}
|
||||
|
||||
@incollection{korns_accuracy_2011,
|
||||
location = {New York, {NY}},
|
||||
title = {Accuracy in Symbolic Regression},
|
||||
isbn = {978-1-4614-1770-5},
|
||||
url = {https://doi.org/10.1007/978-1-4614-1770-5_8},
|
||||
abstract = {This chapter asserts that, in current state-of-the-art symbolic regression engines, accuracy is poor. That is to say that state-of-the-art symbolic regression engines return a champion with good fitness; however, obtaining a champion with the correct formula is not forthcoming even in cases of only one basis function with minimally complex grammar depth. Ideally, users expect that for test problems created with no noise, using only functions in the specified grammar, with only one basis function and some minimal grammar depth, that state-of-the-art symbolic regression systems should return the exact formula (or at least an isomorph) used to create the test data. Unfortunately, this expectation cannot currently be achieved using published state-of-the-art symbolic regression techniques. Several classes of test formulas, which prove intractable, are examined and an understanding of why they are intractable is developed. Techniques in Abstract Expression Grammars are employed to render these problems tractable, including manipulation of the epigenome during the evolutionary process, together with breeding of multiple targeted epigenomes in separate population islands. Aselected set of currently intractable problems are shown to be solvable, using these techniques, and a proposal is put forward for a discipline-wide program of improving accuracy in state-of-the-art symbolic regression systems.},
|
||||
pages = {129--151},
|
||||
booktitle = {Genetic Programming Theory and Practice {IX}},
|
||||
publisher = {Springer},
|
||||
author = {Korns, Michael F.},
|
||||
editor = {Riolo, Rick and Vladislavleva, Ekaterina and Moore, Jason H.},
|
||||
urldate = {2025-02-27},
|
||||
date = {2011},
|
||||
langid = {english},
|
||||
doi = {10.1007/978-1-4614-1770-5_8},
|
||||
}
|
||||
|
||||
@article{keijzer_scaled_2004,
|
||||
title = {Scaled Symbolic Regression},
|
||||
volume = {5},
|
||||
issn = {1573-7632},
|
||||
url = {https://doi.org/10.1023/B:GENP.0000030195.77571.f9},
|
||||
doi = {10.1023/B:GENP.0000030195.77571.f9},
|
||||
abstract = {Performing a linear regression on the outputs of arbitrary symbolic expressions has empirically been found to provide great benefits. Here some basic theoretical results of linear regression are reviewed on their applicability for use in symbolic regression. It will be proven that the use of a scaled error measure, in which the error is calculated after scaling, is expected to perform better than its unscaled counterpart on all possible symbolic regression problems. As the method (i) does not introduce additional parameters to a symbolic regression run, (ii) is guaranteed to improve results on most symbolic regression problems (and is not worse on any other problem), and (iii) has a well-defined upper bound on the error, scaled squared error is an ideal candidate to become the standard error measure for practical applications of symbolic regression.},
|
||||
pages = {259--269},
|
||||
number = {3},
|
||||
journaltitle = {Genetic Programming and Evolvable Machines},
|
||||
shortjournal = {Genet Program Evolvable Mach},
|
||||
author = {Keijzer, Maarten},
|
||||
urldate = {2025-02-27},
|
||||
date = {2004-09-01},
|
||||
langid = {english},
|
||||
keywords = {Artificial Intelligence, genetic programming, linear regression, symbolic regression},
|
||||
file = {Full Text PDF:C\:\\Users\\danwi\\Zotero\\storage\\ZH9LAN74\\Keijzer - 2004 - Scaled Symbolic Regression.pdf:application/pdf},
|
||||
}
|
||||
|
||||
@misc{jin_bayesian_2020,
|
||||
title = {Bayesian Symbolic Regression},
|
||||
url = {http://arxiv.org/abs/1910.08892},
|
||||
doi = {10.48550/arXiv.1910.08892},
|
||||
abstract = {Interpretability is crucial for machine learning in many scenarios such as quantitative finance, banking, healthcare, etc. Symbolic regression ({SR}) is a classic interpretable machine learning method by bridging X and Y using mathematical expressions composed of some basic functions. However, the search space of all possible expressions grows exponentially with the length of the expression, making it infeasible for enumeration. Genetic programming ({GP}) has been traditionally and commonly used in {SR} to search for the optimal solution, but it suffers from several limitations, e.g. the difficulty in incorporating prior knowledge; overly-complicated output expression and reduced interpretability etc. To address these issues, we propose a new method to fit {SR} under a Bayesian framework. Firstly, Bayesian model can naturally incorporate prior knowledge (e.g., preference of basis functions, operators and raw features) to improve the efficiency of fitting {SR}. Secondly, to improve interpretability of expressions in {SR}, we aim to capture concise but informative signals. To this end, we assume the expected signal has an additive structure, i.e., a linear combination of several concise expressions, whose complexity is controlled by a well-designed prior distribution. In our setup, each expression is characterized by a symbolic tree, and the proposed {SR} model could be solved by sampling symbolic trees from the posterior distribution using an efficient Markov chain Monte Carlo ({MCMC}) algorithm. Finally, compared with {GP}, the proposed {BSR}(Bayesian Symbolic Regression) method saves computer memory with no need to keep an updated 'genome pool'. Numerical experiments show that, compared with {GP}, the solutions of {BSR} are closer to the ground truth and the expressions are more concise. Meanwhile we find the solution of {BSR} is robust to hyper-parameter specifications such as the number of trees.},
|
||||
number = {{arXiv}:1910.08892},
|
||||
publisher = {{arXiv}},
|
||||
author = {Jin, Ying and Fu, Weilin and Kang, Jian and Guo, Jiadong and Guo, Jian},
|
||||
urldate = {2025-02-27},
|
||||
date = {2020-01-16},
|
||||
eprinttype = {arxiv},
|
||||
eprint = {1910.08892 [stat]},
|
||||
keywords = {Statistics - Methodology},
|
||||
file = {Preprint PDF:C\:\\Users\\danwi\\Zotero\\storage\\3MP48UI3\\Jin et al. - 2020 - Bayesian Symbolic Regression.pdf:application/pdf;Snapshot:C\:\\Users\\danwi\\Zotero\\storage\\UNNZKPRJ\\1910.html:text/html},
|
||||
}
|
||||
|
Reference in New Issue
Block a user