Skip to content

Commit 92fe753

Browse files
authored
Merge pull request #28 from JuliaMath/rename
VML replaced with IntelVectorMath
2 parents 6371fab + 6178bc9 commit 92fe753

File tree

10 files changed

+78
-70
lines changed

10 files changed

+78
-70
lines changed

.travis.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,6 @@ before_script:
2424

2525
script:
2626
#
27-
- export JL_PKG=VML
28-
- julia --color=yes -e "if VERSION < v\"0.7.0-DEV.5183\"; Pkg.clone(pwd()); Pkg.build(\"VML\"); else using Pkg; if VERSION >= v\"1.1.0-rc1\"; Pkg.build(\"VML\"; verbose=true); else Pkg.build(\"VML\"); end; end"
29-
- julia --check-bounds=yes --color=yes -e "if VERSION < v\"0.7.0-DEV.5183\"; Pkg.test(\"VML\", coverage=true); else using Pkg; Pkg.test(coverage=true); end"
27+
- export JL_PKG=IntelVectorMath
28+
- julia --color=yes -e "if VERSION < v\"0.7.0-DEV.5183\"; Pkg.clone(pwd()); Pkg.build(\"IntelVectorMath\"); else using Pkg; if VERSION >= v\"1.1.0-rc1\"; Pkg.build(\"IntelVectorMath\"; verbose=true); else Pkg.build(\"IntelVectorMath\"); end; end"
29+
- julia --check-bounds=yes --color=yes -e "if VERSION < v\"0.7.0-DEV.5183\"; Pkg.test(\"IntelVectorMath\", coverage=true); else using Pkg; Pkg.test(coverage=true); end"

LICENSE.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
The VML.jl package is licensed under the MIT "Expat" License:
1+
The IntelVectorMath.jl package is licensed under the MIT "Expat" License:
22

33
> Copyright (c) 2014: Simon Kornblith.
44
>

Project.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
name = "VML"
1+
name = "IntelVectorMath"
22
uuid = "c8ce9da6-5d36-5c03-b118-5a70151be7bc"
33
version = "0.2.0"
44

README.md

Lines changed: 40 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -1,42 +1,47 @@
1-
# VML
2-
[![Build Status](https://travis-ci.com/Crown421/VML.jl.svg?branch=master)](https://travis-ci.com/Crown421/VML.jl)
3-
[![Build status](https://ci.appveyor.com/api/projects/status/btdduqfsxux8fhsr?svg=true)](https://ci.appveyor.com/project/Crown421/vml-jl)
1+
# IntelVectorMath.jl (formerly VML.jl)
2+
[![Build Status](https://travis-ci.com/JuliaMath/IntelVectorMath.jl.svg?branch=master)](https://travis-ci.com/JuliaMath/IntelVectorMath.jl)
3+
[![Build status](https://ci.appveyor.com/api/projects/status/btdduqfsxux8fhsr?svg=true)](https://ci.appveyor.com/project/Crown421/IntelVectorMath-jl)
44

5-
This package provides bindings to the Intel Vector Math Library for
6-
arithmetic and transcendental functions. Especially for large vectors it is often substantially faster than broadcasting Julia's built-in functions.
5+
This package provides bindings to the Intel MKL [Vector Mathematics Functions](https://software.intel.com/en-us/node/521751).
6+
This is often substantially faster than broadcasting Julia's built-in functions, especially when applying a transcendental function over a large array.
7+
Until Julia 0.6 the package was registered as `VML.jl`.
78

89
## Basic install
910

10-
To use VML.jl, you must have the shared libraries of the Intel Vector Math Library avilable on your system.
11-
The easiest option is to use [MKL.jl](https://github.com/JuliaComputing/MKL.jl) via
11+
To use IntelVectorMath.jl, you must have the shared libraries of the Intel Vector Math Library available on your system.
12+
The easiest option is to use [MKL.jl](https://github.com/JuliaComputing/MKL.jl) via
1213
```julia
1314
julia> ] add https://github.com/JuliaComputing/MKL.jl.git
1415
```
1516
Alternatively you can install MKL directly [from intel](https://software.intel.com/en-us/mkl/choose-download).
1617

1718
Note that intel MKL has a separate license, which you may want to check for commercial projects (see [FAQ]( https://software.intel.com/en-us/mkl/license-faq)).
1819

19-
To install VML.jl run
20+
To install IntelVectorMath.jl run
2021
```julia
21-
julia> ] add https://github.com/JuliaMath/VML.jl
22+
julia> ] add https://github.com/JuliaMath/IntelVectorMath.jl
2223
```
2324

24-
## Using VML
25-
After loading `VML`, you have the supported function listed below available to call, i.e. `VML.sin(rand(100))`. This should provide a significant speed-up over broadcasting the Base functions.
25+
## Using IntelVectorMath
26+
After loading `IntelVectorMath`, you have the supported function listed below, for example `IntelVectorMath.sin(rand(100))`. These should provide a significant speed-up over broadcasting the Base functions.
27+
Since the package name is quite long, an alias `IVM` is also exported to allow `IVM.sin(rand(100))` after `using` the package.
28+
If you `import` the package, you can add this alias via `const IVM = IntelVectorMath`. Equally, you can replace `IVM` with another alias of your choice.
29+
30+
#### Example
2631
```julia
27-
julia> using VML, BenchmarkTools
32+
julia> using IntelVectorMath, BenchmarkTools
2833

2934
julia> a = randn(10^4);
3035

3136
julia> @btime sin.($a); # apply Base.sin to each element
3237
102.128 μs (2 allocations: 78.20 KiB)
3338

34-
julia> @btime VML.sin($a); # apply VML.sin to the whole array
39+
julia> @btime IVM.sin($a); # apply IVM.sin to the whole array
3540
20.900 μs (2 allocations: 78.20 KiB)
3641

3742
julia> b = similar(a);
3843

39-
julia> @btime VML.sin!(b, a); # in-place version
44+
julia> @btime IVM.sin!(b, a); # in-place version
4045
20.008 μs (0 allocations: 0 bytes)
4146
```
4247

@@ -50,7 +55,7 @@ julia> @btime sin($a);
5055
julia> ans sin.(a)
5156
true
5257
```
53-
Calling `sin` on an array now calls the a VML function, while its action on scalars is unchanged.
58+
Calling `sin` on an array now calls the a IntelVectorMath function, while its action on scalars is unchanged.
5459

5560
#### Note:
5661

@@ -61,47 +66,47 @@ julia> exp(ones(2,2))
6166
4.19453 3.19453
6267
3.19453 4.19453
6368

64-
julia> VML.exp(ones(2,2))
69+
julia> IVM.exp(ones(2,2))
6570
2×2 Array{Float64,2}:
6671
2.71828 2.71828
6772
2.71828 2.71828
6873

6974
julia> ans == exp.(ones(2,2))
7075
true
7176
```
72-
If your code, or any code you call, uses matrix exponentiation, then `@overload exp` may silently lead to incorrect results. This caution applies to all trigonometric functions, too, since they have matrix forms defined by matrix exponential.
77+
If your code, or any code you call, uses matrix exponentiation, then `@overload exp` may silently lead to incorrect results. This caution applies to all trigonometric functions, too, since they have matrix forms defined by matrix exponentials.
7378

7479
### Accuracy
7580

76-
By default, VML uses `VML_HA` mode, which corresponds to an accuracy of
81+
By default, IntelVectorMath uses `VML_HA` mode, which corresponds to an accuracy of
7782
<1 ulp, matching the accuracy of Julia's built-in openlibm
7883
implementation, although the exact results may be different. To specify
7984
low accuracy, use `vml_set_accuracy(VML_LA)`. To specify enhanced
8085
performance, use `vml_set_accuracy(VML_EP)`. More documentation
8186
regarding these options is available on
82-
[Intel's website](http://software.intel.com/sites/products/documentation/hpc/mkl/vml/vmldata.htm).
87+
[Intel's website](http://software.intel.com/sites/products/documentation/hpc/mkl/IntelVectorMath/vmldata.htm).
8388

8489
## Performance
8590
(These results are currently outdated and will be updated in due course)
86-
![VML Performance Comparison](/benchmark/performance.png)
91+
![IntelVectorMath Performance Comparison](/benchmark/performance.png)
8792

88-
![VML Complex Performance Comparison](/benchmark/performance_complex.png)
93+
![IntelVectorMath Complex Performance Comparison](/benchmark/performance_complex.png)
8994

9095
Tests were performed on an Intel(R) Core(TM) i7-3930K CPU. Error bars
9196
are 95% confidence intervals based on 25 repetitions of each test with
9297
a 1,000,000 element vector. The dashed line indicates equivalent
93-
performance for VML versus the implementations in Base. Both Base and
94-
VML use only a single core when performing these benchmarks.
98+
performance for IntelVectorMath versus the implementations in Base. Both Base and
99+
IntelVectorMath use only a single core when performing these benchmarks.
95100

96101
## Supported functions
97102

98-
VML.jl supports the following functions, most for Float32 and
103+
IntelVectorMath.jl supports the following functions, most for Float32 and
99104
Float64, while some also take complex numbers.
100105

101106
### Unary functions
102107

103108
Allocating forms have signature `f(A)`. Mutating forms have signatures
104-
`f!(A)` (in place) and `f!(out, A)` (out of place). The last 9 functions have been moved from Base to `SpecialFunctions.jl` or have no Base equivalent.
109+
`f!(A)` (in place) and `f!(out, A)` (out of place). The last 9 functions have been moved from Base to `SpecialFunctions.jl` or have no Base equivalent.
105110

106111
Allocating | Mutating
107112
-----------|---------
@@ -143,7 +148,7 @@ Allocating | Mutating
143148
### Binary functions
144149

145150
Allocating forms have signature `f(A, B)`. Mutating forms have
146-
signature `f!(out, A, B)`.
151+
signature `f!(out, A, B)`.
147152

148153
Allocating | Mutating
149154
-----------|---------
@@ -154,21 +159,21 @@ Allocating | Mutating
154159

155160

156161
## Next steps
157-
Next steps for this package
162+
Next steps for this package
158163
* [x] Windows support
159164
* [x] Basic Testing
160165
* [x] Avoiding overloading base and optional overload function
161-
* [ ] Updating Benchmarks
162166
* [x] Travis and AppVeyor testing
163167
* [x] Adding CIS function
168+
* [ ] Updating Benchmarks
164169
* [ ] Add tests for mutating functions
170+
* [ ] Add test for using standalone MKL
165171

166172

173+
## Advanced
174+
IntelVectorMath.jl works via Libdl which loads the relevant shared libraries. Libdl automatically finds the relevant libraries if the location of the binaries has been added to the system search paths.
175+
This already taken care of if you use MKL.jl, but the stand-alone may require you to source `mklvars.sh`. The default command on Mac and Ubuntu is `source /opt/intel/mkl/bin/mklvars.sh intel64`. You may want to add this to your `.bashrc`.
176+
Adding a new `*.conf` file in `/etc/ld.so.conf.d` also works, as the `intel-mkl-slim` package in the AUR does automatically.
167177

168-
## Advanced
169-
VML.jl works via Libdl which loads the relevant shared libraries. Libdl automatically finds the relevant libraries if the location of the binaries has been added to the system search paths.
170-
This already taken care of if you use MKL.jl, but the stand-alone may require you to source `mklvars.sh`. The default command on Mac and Ubuntu is `source /opt/intel/mkl/bin/mklvars.sh intel64`. You may want to add this to your `.bashrc`.
171-
Adding a new `*.conf` file in `/etc/ld.so.conf.d` also works, as the `intel-mkl-slim` package in the AUR does automatically.
172-
173-
Further, VML.jl uses [CpuId.jl](https://github.com/m-j-w/CpuId.jl) to detect if your processor supports the newer `avx2` instructions, and if not defaults to `libmkl_vml_avx`. If your system does not have AVX this package will currently not work for you.
174-
If the CPU feature detection does not work for you, please open an issue.
178+
Further, IntelVectorMath.jl uses [CpuId.jl](https://github.com/m-j-w/CpuId.jl) to detect if your processor supports the newer `avx2` instructions, and if not defaults to `libmkl_vml_avx`. If your system does not have AVX this package will currently not work for you.
179+
If the CPU feature detection does not work for you, please open an issue.

benchmark/benchmark.jl

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
using VML
1+
using IntelVectorMath
22
using Distributions, Statistics, BenchmarkTools # for benchmark
33
using Plots # for plotting
44
using JLD2, FileIO # to save file
@@ -32,7 +32,7 @@ fns = [[x[1:2] for x in base_unary_real];
3232
"""
3333
bench(fns, input)
3434
35-
benchmark function for VML.jl. Calls both Base and VML functions and stores the benchmarks in two nested Dict. First layer specifies type, and second layer specifies the function name. The result is a Tuple, 1st element being benchmark for Base/SpecialFunctions and 2nd element being for VML.
35+
benchmark function for IntelVectorMath.jl. Calls both Base and IntelVectorMath functions and stores the benchmarks in two nested Dict. First layer specifies type, and second layer specifies the function name. The result is a Tuple, 1st element being benchmark for Base/SpecialFunctions and 2nd element being for IntelVectorMath.
3636
3737
# Examples
3838
```julia
@@ -41,14 +41,14 @@ input = Dict( Float64 => [(rand(1000)); (rand(1000), rand(1000)); (rand(1000))])
4141
times = bench(fns, input)
4242
4343
times[Float64][:acos][1] # Base.acos benchmark for Float64
44-
times[Float64][:acos][2] # VML.acos benchmark for Float64
44+
times[Float64][:acos][2] # IntelVectorMath.acos benchmark for Float64
4545
```
4646
"""
4747
function bench(fns, input)
4848
Dict(t => begin
4949
Dict( fn[2] => begin
5050
base_fn = eval(:($(fn[1]).$(fn[2])))
51-
vml_fn = eval(:(VML.$(fn[2])))
51+
vml_fn = eval(:(IntelVectorMath.$(fn[2])))
5252
println("benchmarking $vml_fn for type $t")
5353
timesBase = @benchmark $base_fn.($inp...)
5454
timesVML = @benchmark $vml_fn($inp...)
@@ -137,8 +137,8 @@ function plotBench()
137137
# end
138138
xlims!(0, length(fns) + 1)
139139
xticks!(1:length(fns)+1, fname, rotation = 70, fontsize = 10)
140-
title!("VML Performance for array of size $NVALS")
141-
ylabel!("Relative Speed (VML/Base)")
140+
title!("IntelVectorMath Performance for array of size $NVALS")
141+
ylabel!("Relative Speed (IntelVectorMath/Base)")
142142
hline!([1], line=(4, :dash, 0.6, [:green]), labels = 1)
143143
savefig("performance$(complex ? "_complex" : "").png")
144144

src/VML.jl renamed to src/IntelVectorMath.jl

Lines changed: 14 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,9 @@
11
__precompile__()
22

3-
module VML
3+
module IntelVectorMath
4+
5+
export IVM
6+
const IVM = IntelVectorMath
47

58
# import Base: .^, ./
69
using SpecialFunctions
@@ -88,7 +91,7 @@ for t in (Float32, Float64)
8891
def_unary_op(Complex{t}, t, :abs, :abs!, :Abs)
8992
def_unary_op(Complex{t}, t, :angle, :angle!, :Arg)
9093

91-
### cis is special, vml function is based on output
94+
### cis is special, IntelVectorMath function is based on output
9295
def_unary_op(t, Complex{t}, :cis, :cis!, :CIS; vmltype = Complex{t})
9396

9497
# Binary, complex-only. These are more accurate but performance is
@@ -104,37 +107,37 @@ end
104107
"""
105108
@overload exp log sin
106109
107-
This macro adds a method to each function in `Base` (or perhaps in `SpecialFunctions`),
108-
so that when acting on an array (or two arrays) it calls the `VML` function of the same name.
110+
This macro adds a method to each function in `Base` (or perhaps in `SpecialFunctions`),
111+
so that when acting on an array (or two arrays) it calls the `IntelVectorMath` function of the same name.
109112
110113
The existing action on scalars is unaffected. However, `exp(M::Matrix)` will now mean
111-
element-wise `VML.exp(M) == exp.(M)`, rather than matrix exponentiation.
114+
element-wise `IntelVectorMath.exp(M) == exp.(M)`, rather than matrix exponentiation.
112115
"""
113116
macro overload(funs...)
114117
out = quote end
115118
say = []
116119
for f in funs
117120
if f in _UNARY
118121
if isdefined(Base, f)
119-
push!(out.args, :( Base.$f(A::Array) = VML.$f(A) ))
122+
push!(out.args, :( Base.$f(A::Array) = IntelVectorMath.$f(A) ))
120123
push!(say, "Base.$f(A)")
121124
elseif isdefined(SpecialFunctions, f)
122-
push!(out.args, :( VML.SpecialFunctions.$f(A::Array) = VML.$f(A) ))
125+
push!(out.args, :( IntelVectorMath.SpecialFunctions.$f(A::Array) = IntelVectorMath.$f(A) ))
123126
push!(say, "SpecialFunctions.$f(A)")
124127
else
125-
@error "function VML.$f is not defined in Base or SpecialFunctions, so there is nothing to overload"
128+
@error "function IntelVectorMath.$f is not defined in Base or SpecialFunctions, so there is nothing to overload"
126129
end
127130
end
128131
if f in _BINARY
129132
if isdefined(Base, f)
130-
push!(out.args, :( Base.$f(A::Array, B::Array) = VML.$f(A, B) ))
133+
push!(out.args, :( Base.$f(A::Array, B::Array) = IntelVectorMath.$f(A, B) ))
131134
push!(say, "Base.$f(A, B)")
132135
else
133-
@error "function VML.$f is not defined in Base, so there is nothing to overload"
136+
@error "function IntelVectorMath.$f is not defined in Base, so there is nothing to overload"
134137
end
135138
end
136139
if !(f in _UNARY) && !(f in _BINARY)
137-
error("there is no function $f defined by VML.jl")
140+
error("there is no function $f defined by IntelVectorMath.jl")
138141
end
139142
end
140143
str = string("Overloaded these functions: \n ", join(say, " \n "))

src/setup.jl

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,9 +46,9 @@ function vml_check_error()
4646
# Singularity, overflow, or underflow
4747
# I don't think Base throws on these
4848
elseif vml_error == 1000
49-
warn("VML does not support $(vml_get_accuracy); lower accuracy used instead")
49+
warn("IntelVectorMath does not support $(vml_get_accuracy); lower accuracy used instead")
5050
else
51-
error("an unexpected error occurred in VML ($vml_error)")
51+
error("an unexpected error occurred in IntelVectorMath ($vml_error)")
5252
end
5353
end
5454
end

test/complex.jl

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,11 +18,11 @@ fns = [x[1:2] for x in base_unary_complex]
1818
for t in (ComplexF32, ComplexF64), i = 1:length(fns)
1919

2020
base_fn = eval(:($(fns[i][1]).$(fns[i][2])))
21-
vml_fn = eval(:(VML.$(fns[i][2])))
21+
vml_fn = eval(:(IntelVectorMath.$(fns[i][2])))
2222

23-
Test.@test which(vml_fn, typeof(input[t][i])).module == VML
23+
Test.@test which(vml_fn, typeof(input[t][i])).module == IntelVectorMath
2424

25-
# Test.test_approx_eq(output[t][i], fn(input[t][i]...), "Base $t $fn", "VML $t $fn")
25+
# Test.test_approx_eq(output[t][i], fn(input[t][i]...), "Base $t $fn", "IntelVectorMath $t $fn")
2626
Test.@test vml_fn(input[t][i]...) base_fn.(input[t][i]...)
2727

2828
end

test/real.jl

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -19,12 +19,12 @@ fns = [[x[1:2] for x in base_unary_real]; [x[1:2] for x in base_binary_real]]
1919
for t in (Float32, Float64), i = 1:length(fns)
2020

2121
base_fn = eval(:($(fns[i][1]).$(fns[i][2])))
22-
vml_fn = eval(:(VML.$(fns[i][2])))
23-
# vml_fn! = eval(:(VML.$(fns[i][2])!))
22+
vml_fn = eval(:(IntelVectorMath.$(fns[i][2])))
23+
# vml_fn! = eval(:(IntelVectorMath.$(fns[i][2])!))
2424

25-
Test.@test which(vml_fn, typeof(input[t][i])).module == VML
25+
Test.@test which(vml_fn, typeof(input[t][i])).module == IntelVectorMath
2626

27-
# Test.test_approx_eq(output[t][i], fn(input[t][i]...), "Base $t $fn", "VML $t $fn")
27+
# Test.test_approx_eq(output[t][i], fn(input[t][i]...), "Base $t $fn", "IntelVectorMath $t $fn")
2828
Test.@test vml_fn(input[t][i]...) base_fn.(input[t][i]...)
2929

3030
end
@@ -34,8 +34,8 @@ end
3434
@testset "Error Handling and Settings" begin
3535

3636
# Verify that we still throw DomainErrors
37-
Test.@test_throws DomainError VML.sqrt([-1.0])
38-
Test.@test_throws DomainError VML.log([-1.0])
37+
Test.@test_throws DomainError IntelVectorMath.sqrt([-1.0])
38+
Test.@test_throws DomainError IntelVectorMath.log([-1.0])
3939

4040
# Setting accuracy
4141
vml_set_accuracy(VML_LA)
@@ -48,7 +48,7 @@ end
4848

4949
@testset "@overload macro" begin
5050

51-
@test VML.exp([1.0]) exp.([1.0])
51+
@test IntelVectorMath.exp([1.0]) exp.([1.0])
5252
@test_throws MethodError Base.exp([1.0])
5353
@test (@overload log exp) isa String
5454
@test Base.exp([1.0]) exp.([1.0])

test/runtests.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
using Test
2-
using VML
2+
using IntelVectorMath
33

44
include("common.jl")
55
include("real.jl")

0 commit comments

Comments
 (0)