Skip to content

Commit

Permalink
Fix non-breaking typos
Browse files Browse the repository at this point in the history
  • Loading branch information
ttytm committed Jun 6, 2024
1 parent eb7c626 commit ad01618
Show file tree
Hide file tree
Showing 9 changed files with 20 additions and 22 deletions.
2 changes: 1 addition & 1 deletion autograd/context.v
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ pub mut:
// This list can contain duplicates
nodes []&Node[T]
// If no_grad is set to true, operations will not
// be cached, and backpropogation will not be possible
// be cached, and backpropagation will not be possible
no_grad bool
}

Expand Down
2 changes: 1 addition & 1 deletion autograd/gate.v
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ import vtl
pub interface CacheParam {}

// Gate is an object that can cache the result of an operation,
// as well as backpropogate a payload backwards along the
// as well as backpropagate a payload backwards along the
// computational graph
//
// Structs that implement from this interface can add instance
Expand Down
8 changes: 4 additions & 4 deletions autograd/gates_basic.v
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,8 @@ pub fn subtract_gate[T]() &SubstractGate[T] {

pub fn (g &SubstractGate[T]) backward[T](payload &Payload[T]) ![]&vtl.Tensor[T] {
gradient := payload.variable.grad
oposite := gradient.multiply_scalar[T](vtl.cast[T](-1))!
return [gradient, oposite]
opposite := gradient.multiply_scalar[T](vtl.cast[T](-1))!
return [gradient, opposite]
}

pub fn (g &SubstractGate[T]) cache[T](mut result Variable[T], args ...CacheParam) ! {
Expand Down Expand Up @@ -134,8 +134,8 @@ pub fn (g &DivideGate[T]) backward[T](payload &Payload[T]) ![]&vtl.Tensor[T] {
gradient := payload.variable.grad
r0 := gradient.divide[T](g.b.value)!
bx2 := g.b.value.multiply_scalar[T](vtl.cast[T](2))!
oposite := gradient.multiply_scalar[T](vtl.cast[T](-1))!
mut r1 := oposite.multiply[T](g.a.value)!
opposite := gradient.multiply_scalar[T](vtl.cast[T](-1))!
mut r1 := opposite.multiply[T](g.a.value)!
r1 = r1.divide[T](bx2)!
return [r0, r1]
}
Expand Down
4 changes: 2 additions & 2 deletions autograd/variable.v
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ import vtl
// Variable is an abstraction of a vtl.Tensor that tracks
// the operations done to the vtl.Tensor. It also keeps
// track of the gradient of the operation if a Variable
// needs to backpropogate.
// needs to backpropagate.
// This is the fundamental object used in automatic
// differentiation, as well as the neural network aspects
// of VTL
Expand Down Expand Up @@ -64,7 +64,7 @@ pub fn (v &Variable[T]) str() string {
return v.value.str()
}

// backprop Back propogates an operation along a computational graph.
// backprop Back propagates an operation along a computational graph.
// This operation will destroy the operational graph, populating
// the gradients for all variables that are predecessors of
// the Variable this is called on.
Expand Down
2 changes: 1 addition & 1 deletion datasets/loader.v
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ struct DatasetDownload {
fn download_dataset(data DatasetDownload) !string {
dataset_dir := os.real_path(get_cache_dir('datasets', data.dataset))

// Handline extensions like `*.tar.gz`.
// Handle extensions like `*.tar.gz`.
exts := os.file_name(data.file).rsplit_nth('.', 3)
is_tar := exts[0] == 'tar' || (exts.len > 1 && exts[1] == 'tar')

Expand Down
2 changes: 1 addition & 1 deletion examples/vtl_opencl_vcl_support/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ This example shows how to use the OpenCL backend with VCL support.

## Prerequisites

Read the [docs](https://vlang.github.io/vsl/vcl.html) of the V Computating Language (VCL) and the OpenCL backend.
Read the [docs](https://vlang.github.io/vsl/vcl.html) of the V Computing Language (VCL) and the OpenCL backend.

## Running the example

Expand Down
6 changes: 2 additions & 4 deletions src/fun.v
Original file line number Diff line number Diff line change
Expand Up @@ -167,15 +167,13 @@ pub fn (t &Tensor[T]) transpose[T](order []int) !&Tensor[T] {
return ret
}

// t returns a ful transpose of an tensor, with the axes
// reversed
// t returns a full transpose of a tensor, with the axes reversed
pub fn (t &Tensor[T]) t[T]() !&Tensor[T] {
order := irange(0, t.rank())
return t.transpose(order.reverse())
}

// swapaxes returns a view of an tensor with two axes
// swapped.
// swapaxes returns a view of an tensor with two axes swapped
pub fn (t &Tensor[T]) swapaxes[T](a1 int, a2 int) !&Tensor[T] {
mut order := irange(0, t.rank())
tmp := order[a1]
Expand Down
12 changes: 6 additions & 6 deletions stats/stats.v
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ pub fn prod_axis_with_dims[T](t &vtl.Tensor[T], data AxisData) T {
return acc
}

// Measure of Occurance
// Measure of Occurrence
// Frequency of a given number
// Based on
// https://www.mathsisfun.com/data/frequency-distribution.html
Expand All @@ -99,7 +99,7 @@ pub fn freq[T](t &vtl.Tensor[T], val T) int {
return count
}

// Measure of Central Tendancy
// Measure of Central Tendency
// Mean of the given input array
// Based on
// https://www.mathsisfun.com/data/central-measures.html
Expand All @@ -113,7 +113,7 @@ pub fn mean[T](t &vtl.Tensor[T]) T {
}) / vtl.cast[T](t.size)
}

// Measure of Central Tendancy
// Measure of Central Tendency
// Geometric Mean of the given input array
// Based on
// https://www.mathsisfun.com/numbers/geometric-mean.html
Expand All @@ -128,7 +128,7 @@ pub fn geometric_mean[T](t &vtl.Tensor[T]) T {
return math.pow(prod, vtl.cast[T](1) / vtl.cast[T](t.size))
}

// Measure of Central Tendancy
// Measure of Central Tendency
// Harmonic Mean of the given input array
// Based on
// https://www.mathsisfun.com/numbers/harmonic-mean.html
Expand All @@ -142,7 +142,7 @@ pub fn harmonic_mean[T](t &vtl.Tensor[T]) T {
})
}

// Measure of Central Tendancy
// Measure of Central Tendency
// Median of the given input array ( input array is assumed to be sorted )
// Based on
// https://www.mathsisfun.com/data/central-measures.html
Expand All @@ -158,7 +158,7 @@ pub fn median[T](t &vtl.Tensor[T]) T {
}
}

// Measure of Central Tendancy
// Measure of Central Tendency
// Mode of the given input array
// Based on
// https://www.mathsisfun.com/data/central-measures.html
Expand Down
4 changes: 2 additions & 2 deletions storage/cpu.v
Original file line number Diff line number Diff line change
Expand Up @@ -25,13 +25,13 @@ pub fn from_array[T](arr []T) &CpuStorage[T] {
}
}

// Private function. Used to implement Storage operator
// Private function. Used to implement the Storage operator
@[inline]
pub fn (s &CpuStorage[T]) get[T](i int) T {
return s.data[i]
}

// Private function. Used to implement assigment to the Storage element
// Private function. Used to implement assignments to the Storage element
@[inline]
pub fn (mut s CpuStorage[T]) set[T](i int, val T) {
s.data[i] = val
Expand Down

0 comments on commit ad01618

Please sign in to comment.