@@ -1627,3 +1627,152 @@ func (Wood) Minima() []Minimum {
1627
1627
},
1628
1628
}
1629
1629
}
1630
+
1631
+ // ConcaveRight implements an univariate function that is concave to the right
1632
+ // of the minimizer which is located at x=sqrt(2).
1633
+ //
1634
+ // References:
1635
+ // More, J.J., and Thuente, D.J.: Line Search Algorithms with Guaranteed Sufficient Decrease.
1636
+ // ACM Transactions on Mathematical Software 20(3) (1994), 286–307, eq. (5.1)
1637
+ type ConcaveRight struct {}
1638
+
1639
+ func (ConcaveRight ) Func (x []float64 ) float64 {
1640
+ if len (x ) != 1 {
1641
+ panic ("dimension of the problem must be 1" )
1642
+ }
1643
+ return - x [0 ] / (x [0 ]* x [0 ] + 2 )
1644
+ }
1645
+
1646
+ func (ConcaveRight ) Grad (grad , x []float64 ) {
1647
+ if len (x ) != 1 {
1648
+ panic ("dimension of the problem must be 1" )
1649
+ }
1650
+ if len (x ) != len (grad ) {
1651
+ panic ("incorrect size of the gradient" )
1652
+ }
1653
+ xSqr := x [0 ] * x [0 ]
1654
+ grad [0 ] = (xSqr - 2 ) / (xSqr + 2 ) / (xSqr + 2 )
1655
+ }
1656
+
1657
+ // ConcaveLeft implements an univariate function that is concave to the left of
1658
+ // the minimizer which is located at x=399/250=1.596.
1659
+ //
1660
+ // References:
1661
+ // More, J.J., and Thuente, D.J.: Line Search Algorithms with Guaranteed Sufficient Decrease.
1662
+ // ACM Transactions on Mathematical Software 20(3) (1994), 286–307, eq. (5.2)
1663
+ type ConcaveLeft struct {}
1664
+
1665
+ func (ConcaveLeft ) Func (x []float64 ) float64 {
1666
+ if len (x ) != 1 {
1667
+ panic ("dimension of the problem must be 1" )
1668
+ }
1669
+ return math .Pow (x [0 ]+ 0.004 , 4 ) * (x [0 ] - 1.996 )
1670
+ }
1671
+
1672
+ func (ConcaveLeft ) Grad (grad , x []float64 ) {
1673
+ if len (x ) != 1 {
1674
+ panic ("dimension of the problem must be 1" )
1675
+ }
1676
+ if len (x ) != len (grad ) {
1677
+ panic ("incorrect size of the gradient" )
1678
+ }
1679
+ grad [0 ] = math .Pow (x [0 ]+ 0.004 , 3 ) * (5 * x [0 ] - 7.98 )
1680
+ }
1681
+
1682
+ // Plassmann implements an univariate oscillatory function where the value of L
1683
+ // controls the number of oscillations. The value of Beta controls the size of
1684
+ // the derivative at zero and the size of the interval where the strong Wolfe
1685
+ // conditions can hold. For small values of Beta this function represents a
1686
+ // difficult test problem for linesearchers also because the information based
1687
+ // on the derivative is unreliable due to the oscillations.
1688
+ //
1689
+ // References:
1690
+ // More, J.J., and Thuente, D.J.: Line Search Algorithms with Guaranteed Sufficient Decrease.
1691
+ // ACM Transactions on Mathematical Software 20(3) (1994), 286–307, eq. (5.3)
1692
+ type Plassmann struct {
1693
+ L float64 // Number of oscillations for |x-1| ≥ Beta.
1694
+ Beta float64 // Size of the derivative at zero, f'(0) = -Beta.
1695
+ }
1696
+
1697
+ func (f Plassmann ) Func (x []float64 ) float64 {
1698
+ if len (x ) != 1 {
1699
+ panic ("dimension of the problem must be 1" )
1700
+ }
1701
+ a := x [0 ]
1702
+ b := f .Beta
1703
+ l := f .L
1704
+ r := 2 * (1 - b ) / l / math .Pi * math .Sin (l * math .Pi / 2 * a )
1705
+ switch {
1706
+ case a <= 1 - b :
1707
+ r += 1 - a
1708
+ case 1 - b < a && a <= 1 + b :
1709
+ r += 0.5 * ((a - 1 )* (a - 1 )/ b + b )
1710
+ default : // a > 1+b
1711
+ r += a - 1
1712
+ }
1713
+ return r
1714
+ }
1715
+
1716
+ func (f Plassmann ) Grad (grad , x []float64 ) {
1717
+ if len (x ) != 1 {
1718
+ panic ("dimension of the problem must be 1" )
1719
+ }
1720
+ if len (x ) != len (grad ) {
1721
+ panic ("incorrect size of the gradient" )
1722
+ }
1723
+ a := x [0 ]
1724
+ b := f .Beta
1725
+ l := f .L
1726
+ grad [0 ] = (1 - b ) * math .Cos (l * math .Pi / 2 * a )
1727
+ switch {
1728
+ case a <= 1 - b :
1729
+ grad [0 ] -= 1
1730
+ case 1 - b < a && a <= 1 + b :
1731
+ grad [0 ] += (a - 1 ) / b
1732
+ default : // a > 1+b
1733
+ grad [0 ] += 1
1734
+ }
1735
+ }
1736
+
1737
+ // YanaiOzawaKaneko is an univariate convex function where the values of Beta1
1738
+ // and Beta2 control the curvature around the minimum. Far away from the
1739
+ // minimum the function approximates an absolute value function. Near the
1740
+ // minimum, the function can either be sharply curved or flat, controlled by
1741
+ // the parameter values.
1742
+ //
1743
+ // References:
1744
+ // - More, J.J., and Thuente, D.J.: Line Search Algorithms with Guaranteed Sufficient Decrease.
1745
+ // ACM Transactions on Mathematical Software 20(3) (1994), 286–307, eq. (5.4)
1746
+ // - Yanai, H., Ozawa, M., and Kaneko, S.: Interpolation methods in one dimensional
1747
+ // optimization. Computing 27 (1981), 155–163
1748
+ type YanaiOzawaKaneko struct {
1749
+ Beta1 float64
1750
+ Beta2 float64
1751
+ }
1752
+
1753
+ func (f YanaiOzawaKaneko ) Func (x []float64 ) float64 {
1754
+ if len (x ) != 1 {
1755
+ panic ("dimension of the problem must be 1" )
1756
+ }
1757
+ a := x [0 ]
1758
+ b1 := f .Beta1
1759
+ b2 := f .Beta2
1760
+ g1 := math .Sqrt (1 + b1 * b1 ) - b1
1761
+ g2 := math .Sqrt (1 + b2 * b2 ) - b2
1762
+ return g1 * math .Sqrt ((a - 1 )* (a - 1 )+ b2 * b2 ) + g2 * math .Sqrt (a * a + b1 * b1 )
1763
+ }
1764
+
1765
+ func (f YanaiOzawaKaneko ) Grad (grad , x []float64 ) {
1766
+ if len (x ) != 1 {
1767
+ panic ("dimension of the problem must be 1" )
1768
+ }
1769
+ if len (x ) != len (grad ) {
1770
+ panic ("incorrect size of the gradient" )
1771
+ }
1772
+ a := x [0 ]
1773
+ b1 := f .Beta1
1774
+ b2 := f .Beta2
1775
+ g1 := math .Sqrt (1 + b1 * b1 ) - b1
1776
+ g2 := math .Sqrt (1 + b2 * b2 ) - b2
1777
+ grad [0 ] = g1 * (a - 1 )/ math .Sqrt (b2 * b2 + (a - 1 )* (a - 1 )) + g2 * a / math .Sqrt (b1 * b1 + a * a )
1778
+ }
0 commit comments