10
10
< meta name ="Description " content ="scikit-optimize: machine learning in Python ">
11
11
12
12
13
- < title > skopt.learning.gaussian_process.gpr — scikit-optimize 0.8.0 documentation</ title >
13
+ < title > skopt.learning.gaussian_process.gpr — scikit-optimize 0.8.1 documentation</ title >
14
14
15
15
< link rel ="canonical " href ="https://scikit-optimize.github.io/_modules/skopt/learning/gaussian_process/gpr.html " />
16
16
114
114
</ div >
115
115
< div class ="alert alert-danger p-1 mb-2 " role ="alert ">
116
116
< p class ="text-center mb-0 ">
117
- < strong > scikit-optimize 0.8.0 </ strong > < br />
117
+ < strong > scikit-optimize 0.8.1 </ strong > < br />
118
118
< a href ="https://scikit-optimize.github.io/dev/versions.html "> Other versions</ a >
119
119
</ p >
120
120
</ div >
@@ -353,10 +353,15 @@ <h1>Source code for skopt.learning.gaussian_process.gpr</h1><div class="highligh
353
353
< span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> K_inv_</ span > < span class ="o "> =</ span > < span class ="n "> L_inv</ span > < span class ="o "> .</ span > < span class ="n "> dot</ span > < span class ="p "> (</ span > < span class ="n "> L_inv</ span > < span class ="o "> .</ span > < span class ="n "> T</ span > < span class ="p "> )</ span >
354
354
355
355
< span class ="c1 "> # Fix deprecation warning #462</ span >
356
- < span class ="k "> if</ span > < span class ="nb "> int</ span > < span class ="p "> (</ span > < span class ="n "> sklearn</ span > < span class ="o "> .</ span > < span class ="n "> __version__</ span > < span class ="p "> [</ span > < span class ="mi "> 2</ span > < span class ="p "> :</ span > < span class ="mi "> 4</ span > < span class ="p "> ])</ span > < span class ="o "> >=</ span > < span class ="mi "> 19</ span > < span class ="p "> :</ span >
356
+ < span class ="k "> if</ span > < span class ="nb "> int</ span > < span class ="p "> (</ span > < span class ="n "> sklearn</ span > < span class ="o "> .</ span > < span class ="n "> __version__</ span > < span class ="p "> [</ span > < span class ="mi "> 2</ span > < span class ="p "> :</ span > < span class ="mi "> 4</ span > < span class ="p "> ])</ span > < span class ="o "> >=</ span > < span class ="mi "> 23</ span > < span class ="p "> :</ span >
357
+ < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> y_train_std_</ span > < span class ="o "> =</ span > < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> _y_train_std</ span >
357
358
< span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> y_train_mean_</ span > < span class ="o "> =</ span > < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> _y_train_mean</ span >
359
+ < span class ="k "> elif</ span > < span class ="nb "> int</ span > < span class ="p "> (</ span > < span class ="n "> sklearn</ span > < span class ="o "> .</ span > < span class ="n "> __version__</ span > < span class ="p "> [</ span > < span class ="mi "> 2</ span > < span class ="p "> :</ span > < span class ="mi "> 4</ span > < span class ="p "> ])</ span > < span class ="o "> >=</ span > < span class ="mi "> 19</ span > < span class ="p "> :</ span >
360
+ < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> y_train_mean_</ span > < span class ="o "> =</ span > < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> _y_train_mean</ span >
361
+ < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> y_train_std_</ span > < span class ="o "> =</ span > < span class ="mi "> 1</ span >
358
362
< span class ="k "> else</ span > < span class ="p "> :</ span >
359
363
< span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> y_train_mean_</ span > < span class ="o "> =</ span > < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> y_train_mean</ span >
364
+ < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> y_train_std_</ span > < span class ="o "> =</ span > < span class ="mi "> 1</ span >
360
365
361
366
< span class ="k "> return</ span > < span class ="bp "> self</ span > </ div >
362
367
@@ -438,11 +443,14 @@ <h1>Source code for skopt.learning.gaussian_process.gpr</h1><div class="highligh
438
443
< span class ="k "> else</ span > < span class ="p "> :</ span > < span class ="c1 "> # Predict based on GP posterior</ span >
439
444
< span class ="n "> K_trans</ span > < span class ="o "> =</ span > < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> kernel_</ span > < span class ="p "> (</ span > < span class ="n "> X</ span > < span class ="p "> ,</ span > < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> X_train_</ span > < span class ="p "> )</ span >
440
445
< span class ="n "> y_mean</ span > < span class ="o "> =</ span > < span class ="n "> K_trans</ span > < span class ="o "> .</ span > < span class ="n "> dot</ span > < span class ="p "> (</ span > < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> alpha_</ span > < span class ="p "> )</ span > < span class ="c1 "> # Line 4 (y_mean = f_star)</ span >
441
- < span class ="n "> y_mean</ span > < span class ="o "> =</ span > < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> y_train_mean_</ span > < span class ="o "> +</ span > < span class ="n "> y_mean</ span > < span class ="c1 "> # undo normal.</ span >
446
+ < span class ="c1 "> # undo normalisation</ span >
447
+ < span class ="n "> y_mean</ span > < span class ="o "> =</ span > < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> y_train_std_</ span > < span class ="o "> *</ span > < span class ="n "> y_mean</ span > < span class ="o "> +</ span > < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> y_train_mean_</ span >
442
448
443
449
< span class ="k "> if</ span > < span class ="n "> return_cov</ span > < span class ="p "> :</ span >
444
450
< span class ="n "> v</ span > < span class ="o "> =</ span > < span class ="n "> cho_solve</ span > < span class ="p "> ((</ span > < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> L_</ span > < span class ="p "> ,</ span > < span class ="kc "> True</ span > < span class ="p "> ),</ span > < span class ="n "> K_trans</ span > < span class ="o "> .</ span > < span class ="n "> T</ span > < span class ="p "> )</ span > < span class ="c1 "> # Line 5</ span >
445
451
< span class ="n "> y_cov</ span > < span class ="o "> =</ span > < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> kernel_</ span > < span class ="p "> (</ span > < span class ="n "> X</ span > < span class ="p "> )</ span > < span class ="o "> -</ span > < span class ="n "> K_trans</ span > < span class ="o "> .</ span > < span class ="n "> dot</ span > < span class ="p "> (</ span > < span class ="n "> v</ span > < span class ="p "> )</ span > < span class ="c1 "> # Line 6</ span >
452
+ < span class ="c1 "> # undo normalisation</ span >
453
+ < span class ="n "> y_cov</ span > < span class ="o "> =</ span > < span class ="n "> y_cov</ span > < span class ="o "> *</ span > < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> y_train_std_</ span > < span class ="o "> **</ span > < span class ="mi "> 2</ span >
446
454
< span class ="k "> return</ span > < span class ="n "> y_mean</ span > < span class ="p "> ,</ span > < span class ="n "> y_cov</ span >
447
455
448
456
< span class ="k "> elif</ span > < span class ="n "> return_std</ span > < span class ="p "> :</ span >
@@ -459,17 +467,22 @@ <h1>Source code for skopt.learning.gaussian_process.gpr</h1><div class="highligh
459
467
< span class ="n "> warnings</ span > < span class ="o "> .</ span > < span class ="n "> warn</ span > < span class ="p "> (</ span > < span class ="s2 "> "Predicted variances smaller than 0. "</ span >
460
468
< span class ="s2 "> "Setting those variances to 0."</ span > < span class ="p "> )</ span >
461
469
< span class ="n "> y_var</ span > < span class ="p "> [</ span > < span class ="n "> y_var_negative</ span > < span class ="p "> ]</ span > < span class ="o "> =</ span > < span class ="mf "> 0.0</ span >
470
+ < span class ="c1 "> # undo normalisation</ span >
471
+ < span class ="n "> y_var</ span > < span class ="o "> =</ span > < span class ="n "> y_var</ span > < span class ="o "> *</ span > < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> y_train_std_</ span > < span class ="o "> **</ span > < span class ="mi "> 2</ span >
462
472
< span class ="n "> y_std</ span > < span class ="o "> =</ span > < span class ="n "> np</ span > < span class ="o "> .</ span > < span class ="n "> sqrt</ span > < span class ="p "> (</ span > < span class ="n "> y_var</ span > < span class ="p "> )</ span >
463
473
464
474
< span class ="k "> if</ span > < span class ="n "> return_mean_grad</ span > < span class ="p "> :</ span >
465
475
< span class ="n "> grad</ span > < span class ="o "> =</ span > < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> kernel_</ span > < span class ="o "> .</ span > < span class ="n "> gradient_x</ span > < span class ="p "> (</ span > < span class ="n "> X</ span > < span class ="p "> [</ span > < span class ="mi "> 0</ span > < span class ="p "> ],</ span > < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> X_train_</ span > < span class ="p "> )</ span >
466
476
< span class ="n "> grad_mean</ span > < span class ="o "> =</ span > < span class ="n "> np</ span > < span class ="o "> .</ span > < span class ="n "> dot</ span > < span class ="p "> (</ span > < span class ="n "> grad</ span > < span class ="o "> .</ span > < span class ="n "> T</ span > < span class ="p "> ,</ span > < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> alpha_</ span > < span class ="p "> )</ span >
467
-
477
+ < span class ="c1 "> # undo normalisation</ span >
478
+ < span class ="n "> grad_mean</ span > < span class ="o "> =</ span > < span class ="n "> grad_mean</ span > < span class ="o "> *</ span > < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> y_train_std_</ span >
468
479
< span class ="k "> if</ span > < span class ="n "> return_std_grad</ span > < span class ="p "> :</ span >
469
480
< span class ="n "> grad_std</ span > < span class ="o "> =</ span > < span class ="n "> np</ span > < span class ="o "> .</ span > < span class ="n "> zeros</ span > < span class ="p "> (</ span > < span class ="n "> X</ span > < span class ="o "> .</ span > < span class ="n "> shape</ span > < span class ="p "> [</ span > < span class ="mi "> 1</ span > < span class ="p "> ])</ span >
470
481
< span class ="k "> if</ span > < span class ="ow "> not</ span > < span class ="n "> np</ span > < span class ="o "> .</ span > < span class ="n "> allclose</ span > < span class ="p "> (</ span > < span class ="n "> y_std</ span > < span class ="p "> ,</ span > < span class ="n "> grad_std</ span > < span class ="p "> ):</ span >
471
482
< span class ="n "> grad_std</ span > < span class ="o "> =</ span > < span class ="o "> -</ span > < span class ="n "> np</ span > < span class ="o "> .</ span > < span class ="n "> dot</ span > < span class ="p "> (</ span > < span class ="n "> K_trans</ span > < span class ="p "> ,</ span >
472
483
< span class ="n "> np</ span > < span class ="o "> .</ span > < span class ="n "> dot</ span > < span class ="p "> (</ span > < span class ="n "> K_inv</ span > < span class ="p "> ,</ span > < span class ="n "> grad</ span > < span class ="p "> ))[</ span > < span class ="mi "> 0</ span > < span class ="p "> ]</ span > < span class ="o "> /</ span > < span class ="n "> y_std</ span >
484
+ < span class ="c1 "> # undo normalisation</ span >
485
+ < span class ="n "> grad_std</ span > < span class ="o "> =</ span > < span class ="n "> grad_std</ span > < span class ="o "> *</ span > < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> y_train_std_</ span > < span class ="o "> **</ span > < span class ="mi "> 2</ span >
473
486
< span class ="k "> return</ span > < span class ="n "> y_mean</ span > < span class ="p "> ,</ span > < span class ="n "> y_std</ span > < span class ="p "> ,</ span > < span class ="n "> grad_mean</ span > < span class ="p "> ,</ span > < span class ="n "> grad_std</ span >
474
487
475
488
< span class ="k "> if</ span > < span class ="n "> return_std</ span > < span class ="p "> :</ span >
0 commit comments