Skip to content

Commit

Permalink
Normalize all std imports to use import std / xxx
Browse files Browse the repository at this point in the history
This also removes a few unnecessary imports of the complex and math modules (which are automatically exported by arraymancer). The test touches a lot funtions but the changes are minimal and simple (basically changing things like `import unittest, os` into `import std / [unittest, os]` and so on).
  • Loading branch information
AngelEzquerra committed Mar 17, 2024
1 parent ddd24a3 commit 4fda79e
Show file tree
Hide file tree
Showing 112 changed files with 138 additions and 146 deletions.
2 changes: 1 addition & 1 deletion benchmarks/euler_nativearray.nim
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import math, times, strformat
import std / [math, times, strformat]

const
dz = 0.1
Expand Down
2 changes: 1 addition & 1 deletion benchmarks/euler_tensor.nim
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import math, times, strformat
import std / [times, strformat]
import ../src/arraymancer

proc getTime(): float =
Expand Down
2 changes: 1 addition & 1 deletion benchmarks/euler_tensor_optim.nim
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import math, times, strformat
import std / [times, strformat]
import ../src/arraymancer

proc getTime(): float =
Expand Down
3 changes: 2 additions & 1 deletion benchmarks/ex02_mnist.nim
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import ../src/arraymancer, random
import ../src/arraymancer
import std / random

# Make the results reproducible by initializing a random seed
randomize(42)
Expand Down
2 changes: 1 addition & 1 deletion benchmarks/implementation/iteration.nim
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import ../../src/arraymancer
import ../../src/tensor/backend/[openmp, memory_optimization_hints]
import times
import std / times

const
dz = 0.01
Expand Down
4 changes: 3 additions & 1 deletion benchmarks/implementation/logsumexp.nim
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
import times, ../../src/arraymancer, math, sequtils
import std / times
import ../../src/arraymancer
import std / sequtils

# Reference python
# import numpy as np
Expand Down
2 changes: 1 addition & 1 deletion benchmarks/implementation/proc_method_closure_bench.nim
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import times
import std / times

type FooBase = ref object {.inheritable.}
dummy: int
Expand Down
3 changes: 2 additions & 1 deletion benchmarks/implementation/stable_sigmoid_bench.nim
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import times, ../../src/arraymancer, math
import std / times
import ../../src/arraymancer

# The goal is to test the speed of various sigmoid implementation
# Some are numericall stable for positive, negative or both value
Expand Down
4 changes: 2 additions & 2 deletions benchmarks/implementation/triot.nim
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@


import ../../src/tensor/backend/metadataArray
import macros
import std / macros

proc shape_to_strides*(shape: MetadataArray): MetadataArray {.noSideEffect.} =
var accum = 1
Expand Down Expand Up @@ -210,7 +210,7 @@ proc warmup() =
################################################################################################
#### Bench

import times
import std / times


let a = [2, 3, 4].toMetadataArray
Expand Down
2 changes: 1 addition & 1 deletion benchmarks/integer_matmul.nim
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import os, strutils, random
import std / [os, strutils, random]
import ../src/arraymancer

{.passC: "-march=native" .}
Expand Down
2 changes: 1 addition & 1 deletion benchmarks/kostya_matmul.nim
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# From: https://github.com/kostya/benchmarks

import os, strutils
import std / [os, strutils]
import ../src/arraymancer

proc matgen(n: int): auto =
Expand Down
2 changes: 1 addition & 1 deletion benchmarks/kostya_matmul_higher_order_template.nim
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# From: https://github.com/kostya/benchmarks

import os, strutils, sequtils
import std / [os, strutils, sequtils]
import ../src/arraymancer

proc matgen(n: int): auto =
Expand Down
2 changes: 1 addition & 1 deletion benchmarks/kostya_matmul_list_comprehension.nim
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# From: https://github.com/kostya/benchmarks

import os, strutils, sugar
import std / [os, strutils, sugar]
import ../src/arraymancer

proc matgen(n: int): auto =
Expand Down
2 changes: 1 addition & 1 deletion benchmarks/kostya_matmul_mitems.nim
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# From: https://github.com/kostya/benchmarks

import os, strutils
import std / [os, strutils]
import ../src/arraymancer

proc divmod[T: SomeInteger](n: T, b: T): (T, T) =
Expand Down
2 changes: 1 addition & 1 deletion benchmarks/randomized_svd.nim
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import ../src/arraymancer
import times, strformat
import std / [times, strformat]

# Compile with -d:release -d:danger (optionally -d:openmp)

Expand Down
6 changes: 3 additions & 3 deletions docs/docs.nim
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
import macros, strformat, strutils, sequtils, sets, tables, algorithm
import std / [macros, strformat, strutils, sequtils, sets, tables, algorithm]

from os import parentDir, getCurrentCompilerExe, DirSep, extractFilename, `/`, setCurrentDir
from std / os import parentDir, getCurrentCompilerExe, DirSep, extractFilename, `/`, setCurrentDir

# NOTE:
# for some time on devel 1.3.x `paramCount` and `paramStr` had to be imported
# os, because they were removed for nimscript. This was reverted in:
# https://github.com/nim-lang/Nim/pull/14658
# For `nimdoc` we still have to import those from `os`!
when defined(nimdoc):
from os import getCurrentDir, paramCount, paramStr
from std / os import getCurrentDir, paramCount, paramStr

#[
This file is a slightly modified version of the same file of `nimterop`:
Expand Down
3 changes: 2 additions & 1 deletion examples/ex04_fizzbuzz_interview_cheatsheet.nim
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,8 @@
# if it's divisible by 5 print "buzz", and if it's divisible by 15 print "fizzbuzz".

# Let's start with standard imports
import ../src/arraymancer, math, strformat
import ../src/arraymancer
import std / strformat

# We want to input a number and output the correct "fizzbuzz" representation
# ideally the input is a represented by a vector of real values between 0 and 1
Expand Down
3 changes: 2 additions & 1 deletion examples/ex07_save_load_model.nim
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import ../src/arraymancer, strformat, os
import ../src/arraymancer
import std / [strformat, os]

#[
A fully-connected ReLU network with one hidden layer, trained to predict y from x
Expand Down
2 changes: 1 addition & 1 deletion src/arraymancer/autograd/autograd_common.nim
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import typetraits, macros
import std / [typetraits, macros]

# ############################################################
#
Expand Down
4 changes: 2 additions & 2 deletions src/arraymancer/datasets/imdb.nim
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@
# limitations under the License.


import httpclient, strformat, os, strutils,
./util, ../tensor,
import std / [httpclient, strformat, os, strutils]
import ./util, ../tensor,
untar

const
Expand Down
4 changes: 2 additions & 2 deletions src/arraymancer/datasets/mnist.nim
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,8 @@
# The labels values are 0 to 9.


import streams, os, httpClient, strformat, sugar, sequtils,
../tensor, ../io/io_stream_readers, ./util,
import std / [streams, os, httpClient, strformat, sugar, sequtils]

Check failure on line 55 in src/arraymancer/datasets/mnist.nim

View workflow job for this annotation

GitHub Actions / linux-amd64-c (version-1-6)

cannot open file: std/httpClient

Check failure on line 55 in src/arraymancer/datasets/mnist.nim

View workflow job for this annotation

GitHub Actions / linux-amd64-c (version-2-0)

cannot open file: std/httpClient

Check failure on line 55 in src/arraymancer/datasets/mnist.nim

View workflow job for this annotation

GitHub Actions / linux-amd64-c (devel)

cannot open file: std/httpClient
import ../tensor, ../io/io_stream_readers, ./util,
zip/gzipfiles

type Mnist = tuple[
Expand Down
2 changes: 1 addition & 1 deletion src/arraymancer/datasets/util.nim
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import std / os

proc get_cache_home(): string =
result = getEnv("XDG_CACHE_HOME", getHomeDir() / ".cache")
Expand Down
4 changes: 2 additions & 2 deletions src/arraymancer/io/io_csv.nim
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@
# limitations under the License.


import os, parsecsv, streams, strutils, sequtils, algorithm,
../tensor
import std / [os, parsecsv, streams, strutils, sequtils, algorithm]
import ../tensor
from memfiles as mf import nil

proc countLinesAndCols(file: string, sep: char, quote: char,
Expand Down
2 changes: 1 addition & 1 deletion src/arraymancer/laser/openmp.nim
Original file line number Diff line number Diff line change
Expand Up @@ -396,7 +396,7 @@ template omp_taskloop*(
for `index`{.inject.} in `||`(0, length-1, omp_annotation):
block: body

import macros
import std / macros
macro omp_flush*(variables: varargs[untyped]): untyped =
var listvars = "("
for i, variable in variables:
Expand Down
2 changes: 1 addition & 1 deletion src/arraymancer/laser/tensor/datatypes.nim
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ import

when NimVersion < "1.1.0":
# For distinctBase
import sugar
import std / sugar

when not defined(nimHasCursor):
{.pragma: cursor.}
Expand Down
14 changes: 7 additions & 7 deletions src/arraymancer/laser/tensor/initialization.nim
Original file line number Diff line number Diff line change
Expand Up @@ -3,19 +3,19 @@
# Distributed under the Apache v2 License (license terms are at http://www.apache.org/licenses/LICENSE-2.0).
# This file may not be copied, modified, or distributed except according to those terms.

import std/complex
import std / complex

import
../openmp,
../compiler_optim_hints,
../strided_iteration/foreach,
../dynamic_stack_arrays,
../private/nested_containers,
./datatypes,
# Standard library
typetraits, sequtils,
# Third-party
nimblas
./datatypes
# Standard library
import std / [typetraits, sequtils]
# Third-party
import nimblas

when (NimMajor, NimMinor) < (1, 4):
import ../../std_version_types
Expand Down Expand Up @@ -289,7 +289,7 @@ func item*[T_IN, T_OUT](t: Tensor[T_IN], _: typedesc[T_OUT]): T_OUT =
# When the input and the output types are Complex, we need to find
# the "base" type of the output type (e.g. float32 or float64),
# and then convert the real and imaginary parts of the input value
# into the output base type before creating the output complex type
# into the output base type before creating the output complex type
type TT = typeof(
block:
var tmp: T_OUT
Expand Down
5 changes: 2 additions & 3 deletions src/arraymancer/linear_algebra/algebra.nim
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,8 @@
# Distributed under the Apache v2 License (license terms are at http://www.apache.org/licenses/LICENSE-2.0).
# This file may not be copied, modified, or distributed except according to those terms.

import std/complex
import
../tensor
import std / complex
import ../tensor

proc pinv*[T: SomeFloat](A: Tensor[T], rcond = 1e-15): Tensor[T] =
## Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Expand Down
2 changes: 1 addition & 1 deletion src/arraymancer/linear_algebra/helpers/overload.nim
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import macros
import std / macros

# Alias / overload generator
# --------------------------------------------------------------------------------------
Expand Down
2 changes: 1 addition & 1 deletion src/arraymancer/linear_algebra/special_matrices.nim
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

import ../tensor
import ./helpers/triangular
import std/[sequtils, bitops]
import std / [sequtils, bitops]

proc hilbert*(n: int, T: typedesc[SomeFloat]): Tensor[T] =
## Generates an Hilbert matrix of shape [N, N]
Expand Down
3 changes: 1 addition & 2 deletions src/arraymancer/ml/clustering/dbscan.nim
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import deques
import sequtils
import std / [deques, sequtils]

import ../../spatial/[distances, neighbors],
../../tensor
Expand Down
2 changes: 1 addition & 1 deletion src/arraymancer/nn_primitives/nnp_activation.nim
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import math
import std / math
import ../tensor,
./private/p_activation

Expand Down
2 changes: 1 addition & 1 deletion src/arraymancer/nn_primitives/private/p_activation.nim
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import math
import std / math

proc sigmoid*[T: SomeFloat](x: T): T {.inline, noSideEffect.} =
1 / (1 + exp(-x))
2 changes: 1 addition & 1 deletion src/arraymancer/private/ast_utils.nim
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

# Tools to manipulate Nim Abstract Syntax Tree

import macros
import std / macros

proc hasType*(x: NimNode, t: static[string]): bool {. compileTime .} =
## Compile-time type checking
Expand Down
3 changes: 2 additions & 1 deletion src/arraymancer/private/deprecate.nim
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import macros, ./ast_utils
import std / macros
import ./ast_utils

# Handle deprecation and replacement
# --------------------------------------------------------------------------------------
Expand Down
2 changes: 1 addition & 1 deletion src/arraymancer/private/functional.nim
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.

# Functional programming and iterator tooling
import sequtils
import std / sequtils

when not defined(nimHasEffectsOf):
{.pragma: effectsOf.}
Expand Down
3 changes: 1 addition & 2 deletions src/arraymancer/spatial/distances.nim
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import math
import sets
import std / [math, sets]

import ../tensor

Expand Down
2 changes: 1 addition & 1 deletion src/arraymancer/spatial/kdtree.nim
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import math, typetraits
import std / [math, typetraits]

import ../tensor
import ./distances
Expand Down
2 changes: 1 addition & 1 deletion src/arraymancer/stats/distributions.nim
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.

import ../tensor
import math
import std / math

proc gauss*[T](x, mean, sigma: T, norm = false): float =
## Returns a value of the gaussian distribution described by `mean`, `sigma`
Expand Down
4 changes: 2 additions & 2 deletions src/arraymancer/tensor/aggregate.nim
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ import ./data_structure,
./accessors_macros_syntax,
./algorithms,
./private/p_empty_tensors
import std/[math, macros]
import std / [math, macros]
import complex except Complex64, Complex32

# ### Standard aggregate functions
Expand Down Expand Up @@ -459,7 +459,7 @@ proc unwrap_period*[T: SomeNumber](t: Tensor[T], discont: T = -1, axis = -1, per
temp[_] = tAxis +. pAxis

when (NimMajor, NimMinor, NimPatch) > (1, 6, 0):
import std/atomics
import std / atomics
proc nonzero*[T](arg: Tensor[T]): Tensor[int] =
## Returns the indices, which are non zero as a `Tensor[int]`.
##
Expand Down
2 changes: 1 addition & 1 deletion src/arraymancer/tensor/complex.nim
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# Distributed under the Apache v2 License (license terms are at http://www.apache.org/licenses/LICENSE-2.0).
# This file may not be copied, modified, or distributed except according to those terms.

import std/complex
import std / complex
import
./data_structure,
./accessors,
Expand Down
2 changes: 1 addition & 1 deletion src/arraymancer/tensor/einsum.nim
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import macros, sequtils, sets, algorithm
import std / [macros, sequtils, sets, algorithm]
import ../private/ast_utils
import ./shapeshifting
# Note: importing shapeshifting_cuda will trigger a Nim inference bug
Expand Down
2 changes: 1 addition & 1 deletion src/arraymancer/tensor/exporting.nim
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

import ./data_structure
import ./accessors
import std/sequtils
import std / sequtils

proc toRawSeq*[T](t:Tensor[T]): seq[T] {.noSideEffect, deprecated: "This proc cannot be reimplemented in a backward compatible way.".} =
## Convert a tensor to the raw sequence of data.
Expand Down
Loading

0 comments on commit 4fda79e

Please sign in to comment.