Skip to content

Commit

Permalink
Merge pull request #25 from TimDettmers/remove_unused_code
Browse files Browse the repository at this point in the history
Remove unused code, switch to warnings
  • Loading branch information
TimDettmers authored Sep 5, 2022
2 parents eab4d82 + aca5588 commit f0ae860
Show file tree
Hide file tree
Showing 7 changed files with 4 additions and 133 deletions.
7 changes: 3 additions & 4 deletions bitsandbytes/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,9 @@
# cli()
import os
import sys
import torch
from warnings import warn

import torch

HEADER_WIDTH = 60

Expand Down Expand Up @@ -32,8 +33,6 @@ def print_debug_info() -> None:
from . import COMPILED_WITH_CUDA, PACKAGE_GITHUB_URL
from .cuda_setup.main import get_compute_capabilities, get_cuda_lib_handle
from .cuda_setup.env_vars import to_be_ignored
from .utils import print_stderr


print_header("POTENTIALLY LIBRARY-PATH-LIKE ENV VARS")
for k, v in os.environ.items():
Expand Down Expand Up @@ -84,7 +83,7 @@ def print_debug_info() -> None:

except ImportError:
print()
print_stderr(
warn(
f"WARNING: {__package__} is currently running as CPU-only!\n"
"Therefore, 8-bit optimizers and GPU quantization are unavailable.\n\n"
f"If you think that this is so erroneously,\nplease report an issue!"
Expand Down
4 changes: 0 additions & 4 deletions bitsandbytes/autograd/_functions.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import operator
import torch
import bitsandbytes as bnb
import bitsandbytes.functional as F

from dataclasses import dataclass
Expand Down Expand Up @@ -378,9 +377,6 @@ def backward(ctx, grad_output):
return grad_A, grad_B, None, grad_bias, None


matmul = MatMul8bitLt.apply


def matmul(
A: tensor,
B: tensor,
Expand Down
79 changes: 0 additions & 79 deletions bitsandbytes/cuda_setup/compute_capability.py

This file was deleted.

4 changes: 0 additions & 4 deletions bitsandbytes/cuda_setup/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,7 @@
"""

import ctypes
import torch
from pathlib import Path

from ..utils import execute_and_return
from .paths import determine_cuda_runtime_lib_path


Expand Down Expand Up @@ -81,7 +78,6 @@ def get_compute_capabilities(cuda):
cc_major = ctypes.c_int()
cc_minor = ctypes.c_int()

result = ctypes.c_int()
device = ctypes.c_int()

check_cuda_result(cuda, cuda.cuDeviceGetCount(ctypes.byref(nGpus)))
Expand Down
16 changes: 1 addition & 15 deletions bitsandbytes/cuda_setup/paths.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,23 +2,11 @@
from typing import Set, Union
from warnings import warn

from ..utils import print_stderr
from .env_vars import get_potentially_lib_path_containing_env_vars


CUDA_RUNTIME_LIB: str = "libcudart.so"


def purge_unwanted_semicolon(tentative_path: Path) -> Path:
"""
Special function to handle the following exception:
__LMOD_REF_COUNT_PATH=/sw/cuda/11.6.2/bin:2;/mmfs1/home/dettmers/git/sched/bin:1;/mmfs1/home/dettmers/data/anaconda3/bin:1;/mmfs1/home/dettmers/data/anaconda3/condabin:1;/mmfs1/home/dettmers/.local/bin:1;/mmfs1/home/dettmers/bin:1;/usr/local/bin:1;/usr/bin:1;/usr/local/sbin:1;/usr/sbin:1;/mmfs1/home/dettmers/.fzf/bin:1;/mmfs1/home/dettmers/data/local/cuda-11.4/bin:1
"""
# if ';' in str(tentative_path):
# path_as_str, _ = str(tentative_path).split(';')
pass


def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]:
return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path}

Expand All @@ -29,7 +17,7 @@ def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
}

if non_existent_directories:
print_stderr(
warn(
"WARNING: The following directories listed in your path were found to "
f"be non-existent: {non_existent_directories}"
)
Expand Down Expand Up @@ -117,8 +105,6 @@ def determine_cuda_runtime_lib_path() -> Union[Path, None]:
if env_var not in {"CONDA_PREFIX", "LD_LIBRARY_PATH"}
}



cuda_runtime_libs = set()
for env_var, value in remaining_candidate_env_vars.items():
cuda_runtime_libs.update(find_cuda_lib_in(value))
Expand Down
18 changes: 0 additions & 18 deletions bitsandbytes/functional.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
import ctypes as ct
import operator
import random
import math
import torch

from typing import Tuple
Expand Down Expand Up @@ -243,23 +242,6 @@ def get_transform_func(dtype, orderA, orderOut, transpose=False):
return getattr(lib, name)


class GlobalData(object):
_instance = None

def __init__(self):
raise RuntimeError("Call get_instance() instead")

def initialize(self):
self.data = {}

@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls.__new__(cls)
cls._instance.initialize()
return cls._instance


def get_transform_buffer(
shape, dtype, device, to_order, from_order="row", transpose=False
):
Expand Down
9 changes: 0 additions & 9 deletions bitsandbytes/utils.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import shlex
import subprocess
import sys
from typing import Tuple


Expand All @@ -22,11 +21,3 @@ def execute_and_return_decoded_std_streams(command_string):

std_out, std_err = execute_and_return_decoded_std_streams(command_string)
return std_out, std_err


def print_stderr(s: str) -> None:
print(s, file=sys.stderr)


def warn_of_missing_prerequisite(s: str) -> None:
print_stderr("WARNING, missing pre-requisite: " + s)

0 comments on commit f0ae860

Please sign in to comment.