Skip to content
This repository was archived by the owner on Dec 6, 2023. It is now read-only.

Commit 924f25c

Browse files
authored
execute all examples in gallery and prettify printed info (#183)
1 parent 16d0ed0 commit 924f25c

File tree

4 files changed

+9
-13
lines changed

4 files changed

+9
-13
lines changed

doc/conf.py

+1
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,7 @@
6868
sphinx_gallery_conf = {
6969
'examples_dirs': '../examples',
7070
'gallery_dirs': 'auto_examples',
71+
'filename_pattern' : '.py',
7172
'plot_gallery': 'True',
7273
}
7374

examples/document_classification_news20.py

+2-6
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,7 @@
77
import numpy as np
88

99
from sklearn.datasets import fetch_20newsgroups_vectorized
10-
11-
try:
12-
from sklearn.model_selection import train_test_split
13-
except ImportError:
14-
from sklearn.cross_validation import train_test_split
10+
from sklearn.model_selection import train_test_split
1511

1612
from lightning.classification import CDClassifier
1713
from lightning.classification import LinearSVC
@@ -50,4 +46,4 @@
5046
for clf in clfs:
5147
print(clf.__class__.__name__)
5248
clf.fit(X_tr, y_tr)
53-
print(clf.score(X_te, y_te))
49+
print("score =", clf.score(X_te, y_te))

examples/plot_sparse_non_linear.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -3,18 +3,18 @@
33
Sparse non-linear classification
44
================================
55
6-
This examples demonstrates how to use `CDClassifier` with L1 penalty to do
6+
This examples demonstrates how to use :class:`lightning.classification.CDClassifier` with L1 penalty to do
77
sparse non-linear classification. The trick simply consists in fitting the
88
classifier with a kernel matrix (e.g., using an RBF kernel).
99
1010
There are a few interesting differences with standard kernel SVMs:
1111
1212
1. the kernel matrix does not need to be positive semi-definite (hence the
13-
expression "kernel matrix" above is an abuse of terminology)
13+
expression "kernel matrix" above is an abuse of terminology)
1414
1515
2. the number of "support vectors" will be typically smaller thanks to L1
16-
regularization and can be adjusted by the regularization parameter C (the
17-
smaller C, the fewer the support vectors)
16+
regularization and can be adjusted by the regularization parameter C (the
17+
smaller C, the fewer the support vectors)
1818
1919
3. the "support vectors" need not be located at the margin
2020
"""

examples/trace.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -36,9 +36,8 @@ def rank(M, eps=1e-9):
3636
penalty="trace",
3737
multiclass=True)
3838

39+
print(f"{'alpha': <10}| {'score': <25}| {'rank': <5}")
3940
for alpha in (1e-3, 1e-2, 0.1, 0.2, 0.3):
40-
print("alpha=", alpha)
4141
clf.alpha = alpha
4242
clf.fit(X_train, y_train)
43-
print(clf.score(X_test, y_test))
44-
print(rank(clf.coef_))
43+
print(f"{alpha: <10}| {clf.score(X_test, y_test): <25}| {rank(clf.coef_): <5}")

0 commit comments

Comments
 (0)