|
21 | 21 | /* tslint:disable:max-line-length */ |
22 | 22 | /* tslint:disable:max-file-line-count */ |
23 | 23 |
|
| 24 | +import incrBinaryClassification = require( '@stdlib/ml/incr/binary-classification' ); |
24 | 25 | import incrkmeans = require( '@stdlib/ml/incr/kmeans' ); |
| 26 | +import incrSGDRegression = require( '@stdlib/ml/incr/sgd-regression' ); |
25 | 27 |
|
26 | 28 | /** |
27 | 29 | * Interface describing the `incr` namespace. |
28 | 30 | */ |
29 | 31 | interface Namespace { |
| 32 | + /** |
| 33 | + * Online learning for classification using stochastic gradient descent (SGD). |
| 34 | + * |
| 35 | + * ## Method |
| 36 | + * |
| 37 | + * The sub-gradient of the loss function is estimated for each datum and the classification model is updated incrementally, with a decreasing learning rate and regularization of the feature weights based on L2 regularization. |
| 38 | + * |
| 39 | + * ## References |
| 40 | + * |
| 41 | + * - Shalev-Shwartz, S., Singer, Y., Srebro, N., & Cotter, A. (2011). Pegasos: Primal estimated sub-gradient solver for SVM. Mathematical Programming, 127(1), 3–30. doi:10.1007/s10107-010-0420-4 |
| 42 | + * |
| 43 | + * @param options - options object |
| 44 | + * @param options.epsilon - insensitivity parameter (default: 0.1) |
| 45 | + * @param options.eta0 - constant learning rate (default: 0.02) |
| 46 | + * @param options.lambda - regularization parameter (default: 1e-3) |
| 47 | + * @param options.learningRate - string denoting the learning rate to use. Can be `constant`, `pegasos`, or `basic` (default: 'basic') |
| 48 | + * @param options.loss - string denoting the loss function to use. Can be `hinge`, `log`, `modifiedHuber`, `perceptron`, or `squaredHinge` (default: 'log') |
| 49 | + * @param options.intercept - boolean indicating whether to include an intercept (default: true) |
| 50 | + * @throws must provide valid options |
| 51 | + * @returns classification model |
| 52 | + * |
| 53 | + * @example |
| 54 | + * var ns.incrBinaryClassification = require( `@stdlib/streams/ml/incr/sgd-classification` ); |
| 55 | + * |
| 56 | + * var accumulator = ns.incrBinaryClassification({ |
| 57 | + * 'intercept': true |
| 58 | + * 'lambda': 1e-5 |
| 59 | + * }); |
| 60 | + * |
| 61 | + * // Update model as observations come in: |
| 62 | + * var y = -1; |
| 63 | + * var x = [ 2.3, 1.0, 5.0 ]; |
| 64 | + * accumulator( x, y ); |
| 65 | + * |
| 66 | + * // Predict new observation: |
| 67 | + * var yHat = accumulator.predict( x ); |
| 68 | + * |
| 69 | + * // Retrieve coefficients: |
| 70 | + * var coefs = accumulator.coefs; |
| 71 | + */ |
| 72 | + incrBinaryClassification: typeof incrBinaryClassification; |
| 73 | + |
30 | 74 | /** |
31 | 75 | * Returns an accumulator function which incrementally partitions data into `k` clusters. |
32 | 76 | * |
@@ -92,6 +136,48 @@ interface Namespace { |
92 | 136 | * // returns {...} |
93 | 137 | */ |
94 | 138 | incrkmeans: typeof incrkmeans; |
| 139 | + |
| 140 | + /** |
| 141 | + * Online learning for regression using stochastic gradient descent (SGD). |
| 142 | + * |
| 143 | + * ## Method |
| 144 | + * |
| 145 | + * The sub-gradient of the loss function is estimated for each datum and the regression model is updated incrementally, with a decreasing learning rate and regularization of the feature weights based on L2 regularization. |
| 146 | + * |
| 147 | + * ## References |
| 148 | + * |
| 149 | + * - Shalev-Shwartz, S., Singer, Y., Srebro, N., & Cotter, A. (2011). Pegasos: Primal estimated sub-gradient solver for SVM. Mathematical Programming, 127(1), 3–30. doi:10.1007/s10107-010-0420-4 |
| 150 | + * |
| 151 | + * @param options - options object |
| 152 | + * @param options.epsilon - insensitivity parameter (default: 0.1) |
| 153 | + * @param options.eta0 - constant learning rate (default: 0.02) |
| 154 | + * @param options.lambda - regularization parameter (default: 1e-3) |
| 155 | + * @param options.learningRate - string denoting the learning rate to use. Can be `constant`, `pegasos`, or `basic` (default: 'basic') |
| 156 | + * @param options.loss - string denoting the loss function to use. Can be `squaredError`, `epsilonInsensitive`, or `huber` (default: 'squaredError') |
| 157 | + * @param options.intercept - boolean indicating whether to include an intercept (default: true) |
| 158 | + * @throws must provide valid options |
| 159 | + * @returns regression model |
| 160 | + * |
| 161 | + * @example |
| 162 | + * var ns.incrSGDRegression = require( `@stdlib/streams/ml/incr/sgd-regression` ); |
| 163 | + * |
| 164 | + * var accumulator = ns.incrSGDRegression({ |
| 165 | + * 'intercept': true |
| 166 | + * 'lambda': 1e-5 |
| 167 | + * }); |
| 168 | + * |
| 169 | + * // Update model as observations come in: |
| 170 | + * var y = 3.5; |
| 171 | + * var x = [ 2.3, 1.0, 5.0 ]; |
| 172 | + * accumulator( x, y ); |
| 173 | + * |
| 174 | + * // Predict new observation: |
| 175 | + * var yHat = accumulator.predict( x ); |
| 176 | + * |
| 177 | + * // Retrieve coefficients: |
| 178 | + * var coefs = accumulator.coefs; |
| 179 | + */ |
| 180 | + incrSGDRegression: typeof incrSGDRegression; |
95 | 181 | } |
96 | 182 |
|
97 | 183 | /** |
|
0 commit comments