Skip to content

Commit

Permalink
MNT Update black to 23.3.0 (#26110)
Browse files Browse the repository at this point in the history
  • Loading branch information
adrinjalali authored Apr 6, 2023
1 parent 5b46d01 commit 893d5ac
Show file tree
Hide file tree
Showing 183 changed files with 807 additions and 757 deletions.
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ repos:
- id: end-of-file-fixer
- id: trailing-whitespace
- repo: https://github.com/psf/black
rev: 22.3.0
rev: 23.3.0
hooks:
- id: black
- repo: https://github.com/pycqa/flake8
Expand Down
1 change: 0 additions & 1 deletion benchmarks/bench_20newsgroups.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@
# Data

if __name__ == "__main__":

parser = argparse.ArgumentParser()
parser.add_argument(
"-e", "--estimators", nargs="+", required=True, choices=ESTIMATORS
Expand Down
1 change: 0 additions & 1 deletion benchmarks/bench_feature_expansions.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@

fig, axes = plt.subplots(nrows=len(densities), ncols=1, figsize=(8, 10))
for density, ax in zip(densities, axes):

ax.plot(
dimensionalities,
csr_times[density] / trials,
Expand Down
2 changes: 0 additions & 2 deletions benchmarks/bench_glm.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@


if __name__ == "__main__":

import matplotlib.pyplot as plt

n_iter = 40
Expand All @@ -22,7 +21,6 @@
dimensions = 500 * np.arange(1, n_iter + 1)

for i in range(n_iter):

print("Iteration %s of %s" % (i, n_iter))

n_samples, n_features = 10 * i + 3, 10 * i + 3
Expand Down
1 change: 0 additions & 1 deletion benchmarks/bench_isolation_forest.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@ def print_outlier_ratio(y):

# Loop over all datasets for fitting and scoring the estimator:
for dat in datasets:

# Loading and vectorizing the data:
print("====== %s ======" % dat)
print("--- Fetching data...")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,6 @@
r_time = np.empty((len(n_compo_range), n_iter)) * np.nan
# loop
for j, n_components in enumerate(n_compo_range):

n_components = int(n_components)
print("Performing kPCA with n_components = %i" % n_components)

Expand Down
1 change: 0 additions & 1 deletion benchmarks/bench_kernel_pca_solvers_time_vs_n_samples.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,6 @@

# loop
for j, n_samples in enumerate(n_samples_range):

n_samples = int(n_samples)
print("Performing kPCA with n_samples = %i" % n_samples)

Expand Down
1 change: 0 additions & 1 deletion benchmarks/bench_mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,6 @@ def load_data(dtype=np.float32, order="F"):
)
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):

print(
"{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}".format(
name, train_time[name], test_time[name], error[name]
Expand Down
2 changes: 0 additions & 2 deletions benchmarks/bench_online_ocsvm.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,6 @@ def print_outlier_ratio(y):
results_online = np.empty((len(datasets), n_axis + 5))

for dat, dataset_name in enumerate(datasets):

print(dataset_name)

# Loading datasets
Expand Down Expand Up @@ -133,7 +132,6 @@ def print_outlier_ratio(y):
gamma = 1 / n_features # OCSVM default parameter

for random_state in random_states:

print("random state: %s" % random_state)

X, y = shuffle(X, y, random_state=random_state)
Expand Down
1 change: 0 additions & 1 deletion benchmarks/bench_plot_fastkmeans.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@


def compute_bench(samples_range, features_range):

it = 0
results = defaultdict(lambda: [])
chunk = 100
Expand Down
1 change: 0 additions & 1 deletion benchmarks/bench_plot_hierarchical.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@


def compute_bench(samples_range, features_range):

it = 0
results = defaultdict(lambda: [])

Expand Down
1 change: 0 additions & 1 deletion benchmarks/bench_plot_lasso_path.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@


def compute_bench(samples_range, features_range):

it = 0

results = defaultdict(lambda: [])
Expand Down
1 change: 0 additions & 1 deletion benchmarks/bench_plot_omp_lars.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@


def compute_bench(samples_range, features_range):

it = 0

results = dict()
Expand Down
2 changes: 0 additions & 2 deletions benchmarks/bench_plot_randomized_svd.py
Original file line number Diff line number Diff line change
Expand Up @@ -342,7 +342,6 @@ def scalable_frobenius_norm_discrepancy(X, U, s, V):


def bench_a(X, dataset_name, power_iter, n_oversamples, n_comps):

all_time = defaultdict(list)
if enable_spectral_norm:
all_spectral = defaultdict(list)
Expand Down Expand Up @@ -398,7 +397,6 @@ def bench_a(X, dataset_name, power_iter, n_oversamples, n_comps):


def bench_b(power_list):

n_samples, n_features = 1000, 10000
data_params = {
"n_samples": n_samples,
Expand Down
1 change: 0 additions & 1 deletion benchmarks/bench_plot_svd.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@


def compute_bench(samples_range, features_range, n_iter=3, rank=50):

it = 0

results = defaultdict(lambda: [])
Expand Down
50 changes: 28 additions & 22 deletions benchmarks/bench_sample_without_replacement.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,47 +105,53 @@ def bench_sample(sampling, n_population, n_samples):

###########################################################################
# Set Python core input
sampling_algorithm[
"python-core-sample"
] = lambda n_population, n_sample: random.sample(range(n_population), n_sample)
sampling_algorithm["python-core-sample"] = (
lambda n_population, n_sample: random.sample(range(n_population), n_sample)
)

###########################################################################
# Set custom automatic method selection
sampling_algorithm[
"custom-auto"
] = lambda n_population, n_samples, random_state=None: sample_without_replacement(
n_population, n_samples, method="auto", random_state=random_state
sampling_algorithm["custom-auto"] = (
lambda n_population, n_samples, random_state=None: sample_without_replacement(
n_population, n_samples, method="auto", random_state=random_state
)
)

###########################################################################
# Set custom tracking based method
sampling_algorithm[
"custom-tracking-selection"
] = lambda n_population, n_samples, random_state=None: sample_without_replacement(
n_population, n_samples, method="tracking_selection", random_state=random_state
sampling_algorithm["custom-tracking-selection"] = (
lambda n_population, n_samples, random_state=None: sample_without_replacement(
n_population,
n_samples,
method="tracking_selection",
random_state=random_state,
)
)

###########################################################################
# Set custom reservoir based method
sampling_algorithm[
"custom-reservoir-sampling"
] = lambda n_population, n_samples, random_state=None: sample_without_replacement(
n_population, n_samples, method="reservoir_sampling", random_state=random_state
sampling_algorithm["custom-reservoir-sampling"] = (
lambda n_population, n_samples, random_state=None: sample_without_replacement(
n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state,
)
)

###########################################################################
# Set custom reservoir based method
sampling_algorithm[
"custom-pool"
] = lambda n_population, n_samples, random_state=None: sample_without_replacement(
n_population, n_samples, method="pool", random_state=random_state
sampling_algorithm["custom-pool"] = (
lambda n_population, n_samples, random_state=None: sample_without_replacement(
n_population, n_samples, method="pool", random_state=random_state
)
)

###########################################################################
# Numpy permutation based
sampling_algorithm[
"numpy-permutation"
] = lambda n_population, n_sample: np.random.permutation(n_population)[:n_sample]
sampling_algorithm["numpy-permutation"] = (
lambda n_population, n_sample: np.random.permutation(n_population)[:n_sample]
)

###########################################################################
# Remove unspecified algorithm
Expand Down
1 change: 0 additions & 1 deletion benchmarks/bench_text_vectorizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,6 @@ def f():
[CountVectorizer, TfidfVectorizer, HashingVectorizer],
[("word", (1, 1)), ("word", (1, 2)), ("char", (4, 4)), ("char_wb", (4, 4))],
):

bench = {"vectorizer": Vectorizer.__name__}
params = {"analyzer": analyzer, "ngram_range": ngram_range}
bench.update(params)
Expand Down
1 change: 0 additions & 1 deletion benchmarks/bench_tree.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,6 @@ def bench_scikit_tree_regressor(X, Y):


if __name__ == "__main__":

print("============================================")
print("Warning: this is going to take a looong time")
print("============================================")
Expand Down
7 changes: 2 additions & 5 deletions benchmarks/bench_tsne_mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,8 +129,7 @@ def sanitize(filename):
try:
from bhtsne.bhtsne import run_bh_tsne
except ImportError as e:
raise ImportError(
"""\
raise ImportError("""\
If you want comparison with the reference implementation, build the
binary from source (https://github.com/lvdmaaten/bhtsne) in the folder
benchmarks/bhtsne and add an empty `__init__.py` file in the folder:
Expand All @@ -140,8 +139,7 @@ def sanitize(filename):
$ g++ sptree.cpp tsne.cpp tsne_main.cpp -o bh_tsne -O2
$ touch __init__.py
$ cd ..
"""
) from e
""") from e

def bhtsne(X):
"""Wrapper for the reference lvdmaaten/bhtsne implementation."""
Expand All @@ -160,7 +158,6 @@ def bhtsne(X):
methods.append(("lvdmaaten/bhtsne", bhtsne))

if args.profile:

try:
from memory_profiler import profile
except ImportError as e:
Expand Down
1 change: 0 additions & 1 deletion build_tools/generate_authors_table.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,6 @@ def generate_list(contributors):


if __name__ == "__main__":

(
core_devs,
emeritus,
Expand Down
Loading

0 comments on commit 893d5ac

Please sign in to comment.