Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bump tensorflow requirement to <=2.10 #59

Open
wants to merge 5 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
69 changes: 69 additions & 0 deletions .github/workflows/run_tests.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
name: Run Tests

on:
push:
tags:
- '*'
branches:
- '**'
pull_request:
types: [opened, synchronize, reopened, ready_for_review]

jobs:
cancel_previous_runs:
runs-on: ubuntu-latest
steps:
- name: Cancel Previous Runs
uses: styfle/[email protected]
with:
access_token: ${{ github.token }}

run_tests:
needs: cancel_previous_runs
runs-on: ubuntu-latest

strategy:
fail-fast: false
matrix:
config:
- {python: "3.7", tensorflow: "2.0"}
- {python: "3.7", tensorflow: "2.1"}
- {python: "3.8", tensorflow: "2.2"}
- {python: "3.8", tensorflow: "2.3"}
- {python: "3.8", tensorflow: "2.4"}
- {python: "3.9", tensorflow: "2.5"}
- {python: "3.9", tensorflow: "2.7"}
- {python: "3.9", tensorflow: "2.8"}
- {python: "3.9", tensorflow: "2.9"}
- {python: "3.10", tensorflow: "2.10"}

steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0

- name: Set up environment
run: |
echo "LINUX_VERSION=$(uname -rs)" >> $GITHUB_ENV

- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.config.python }}

- name: Cache Python packages
uses: actions/cache@v3
with:
path: ${{ env.pythonLocation }}
key: ${{env.LINUX_VERSION}}-pip-${{ matrix.config.python }}-${{ hashFiles('setup.py') }}
restore-keys: ${{env.LINUX_VERSION}}-pip-${{ matrix.config.python }}-

- name: Install package & dependencies
run: |
python -m pip install --upgrade pip
pip install -U wheel setuptools
pip install -U .[test] tensorflow==${{ matrix.config.tensorflow }}
python -c "import dca"

- name: Run tests
run: pytest -vv
4 changes: 3 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,6 @@ build
*.egg-info
.Rproj.user
docs/build
data/simulation/
data/
.idea

2 changes: 0 additions & 2 deletions dca/__init__.py
Original file line number Diff line number Diff line change
@@ -1,2 +0,0 @@
import os
os.environ['KERAS_BACKEND'] = 'tensorflow'
2 changes: 1 addition & 1 deletion dca/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.
# ==============================================================================

import os, sys, argparse
import argparse

def parse_args():
parser = argparse.ArgumentParser(description='Autoencoder')
Expand Down
2 changes: 1 addition & 1 deletion dca/api.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import os, tempfile, shutil, random
import os, random
import anndata
import numpy as np
import scanpy as sc
Expand Down
2 changes: 1 addition & 1 deletion dca/hyper.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import numpy as np
from kopt import CompileFN, test_fn
from hyperopt import fmin, tpe, hp, Trials
import keras.optimizers as opt
import tensorflow.keras.optimizers as opt

from . import io
from .network import AE_types
Expand Down
3 changes: 1 addition & 2 deletions dca/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,13 @@
from __future__ import division
from __future__ import print_function

import pickle, os, numbers
import pickle

import numpy as np
import scipy as sp
import pandas as pd
import scanpy as sc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import scale


#TODO: Fix this
Expand Down
21 changes: 15 additions & 6 deletions dca/layers.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
from keras.engine.topology import Layer
from keras.layers import Lambda, Dense
from keras.engine.base_layer import InputSpec
from keras import backend as K
from tensorflow.keras.layers import Layer, Lambda, Dense
from tensorflow.keras.layers import InputSpec
from tensorflow.keras import backend as K
import tensorflow as tf


Expand Down Expand Up @@ -81,5 +80,15 @@ def call(self, inputs):
return output


nan2zeroLayer = Lambda(lambda x: tf.where(tf.is_nan(x), tf.zeros_like(x), x))
ColwiseMultLayer = Lambda(lambda l: l[0]*tf.reshape(l[1], (-1,1)))
class ColwiseMultLayer(Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)

def build(self, input_shape):
super().build(input_shape)

def call(self, l):
return l[0]*tf.reshape(l[1], (-1,1))

def compute_output_shape(self, input_shape):
return input_shape[0]
1 change: 0 additions & 1 deletion dca/loss.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import numpy as np
import tensorflow as tf
from keras import backend as K


def _nan2zero(x):
Expand Down
42 changes: 19 additions & 23 deletions dca/network.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,18 +15,14 @@

import os
import pickle
from abc import ABCMeta, abstractmethod

import numpy as np
import scanpy as sc

import keras
from keras.layers import Input, Dense, Dropout, Activation, BatchNormalization, Lambda
from keras.models import Model
from keras.regularizers import l1_l2
from keras.objectives import mean_squared_error
from keras.initializers import Constant
from keras import backend as K
from tensorflow.keras.layers import Input, Dense, Dropout, Activation, BatchNormalization, Lambda
from tensorflow.keras.models import Model
from tensorflow.keras.regularizers import l1_l2
from tensorflow.keras.losses import mean_squared_error
from tensorflow.keras import backend as K

import tensorflow as tf

Expand Down Expand Up @@ -130,7 +126,7 @@ def build(self):
# Use separate act. layers to give user the option to get pre-activations
# of layers when requested
if self.activation in advanced_activations:
last_hidden = keras.layers.__dict__[self.activation](name='%s_act'%layer_name)(last_hidden)
last_hidden = tf.keras.layers.__dict__[self.activation](name='%s_act'%layer_name)(last_hidden)
else:
last_hidden = Activation(self.activation, name='%s_act'%layer_name)(last_hidden)

Expand All @@ -146,7 +142,7 @@ def build_output(self):
mean = Dense(self.output_size, kernel_initializer=self.init,
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name='mean')(self.decoder_output)
output = ColwiseMultLayer([mean, self.sf_layer])
output = ColwiseMultLayer()([mean, self.sf_layer])

# keep unscaled output as an extra model
self.extra_models['mean_norm'] = Model(inputs=self.input_layer, outputs=mean)
Expand Down Expand Up @@ -236,7 +232,7 @@ def build_output(self):
mean = Dense(self.output_size, activation=MeanAct, kernel_initializer=self.init,
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name='mean')(self.decoder_output)
output = ColwiseMultLayer([mean, self.sf_layer])
output = ColwiseMultLayer()([mean, self.sf_layer])
self.loss = poisson_loss

self.extra_models['mean_norm'] = Model(inputs=self.input_layer, outputs=mean)
Expand All @@ -257,7 +253,7 @@ def build_output(self):
disp = ConstantDispersionLayer(name='dispersion')
mean = disp(mean)

output = ColwiseMultLayer([mean, self.sf_layer])
output = ColwiseMultLayer()([mean, self.sf_layer])

nb = NB(disp.theta_exp)
self.loss = nb.loss
Expand Down Expand Up @@ -302,7 +298,7 @@ def build_output(self):
mean = Dense(self.output_size, activation=MeanAct, kernel_initializer=self.init,
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name='mean')(self.decoder_output)
output = ColwiseMultLayer([mean, self.sf_layer])
output = ColwiseMultLayer()([mean, self.sf_layer])
output = SliceLayer(0, name='slice')([output, disp])

nb = NB(theta=disp, debug=self.debug)
Expand Down Expand Up @@ -350,7 +346,7 @@ def build_output(self):
mean = Dense(self.output_size, activation=MeanAct, kernel_initializer=self.init,
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name='mean')(self.decoder_output)
output = ColwiseMultLayer([mean, self.sf_layer])
output = ColwiseMultLayer()([mean, self.sf_layer])
output = SliceLayer(0, name='slice')([output, disp])

nb = NB(theta=disp, debug=self.debug)
Expand Down Expand Up @@ -378,7 +374,7 @@ def build_output(self):
mean = Dense(self.output_size, activation=MeanAct, kernel_initializer=self.init,
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name='mean')(self.decoder_output)
output = ColwiseMultLayer([mean, self.sf_layer])
output = ColwiseMultLayer()([mean, self.sf_layer])
output = SliceLayer(0, name='slice')([output, disp, pi])

zinb = ZINB(pi, theta=disp, ridge_lambda=self.ridge, debug=self.debug)
Expand Down Expand Up @@ -446,7 +442,7 @@ def build_output(self):

mean = Activation(MeanAct, name='mean')(mean_no_act)

output = ColwiseMultLayer([mean, self.sf_layer])
output = ColwiseMultLayer()([mean, self.sf_layer])
output = SliceLayer(0, name='slice')([output, disp, pi])

zinb = ZINB(pi, theta=disp, ridge_lambda=self.ridge, debug=self.debug)
Expand Down Expand Up @@ -478,7 +474,7 @@ def build_output(self):
mean = Dense(self.output_size, activation=MeanAct, kernel_initializer=self.init,
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name='mean')(self.decoder_output)
output = ColwiseMultLayer([mean, self.sf_layer])
output = ColwiseMultLayer()([mean, self.sf_layer])
output = SliceLayer(0, name='slice')([output, disp, pi])

zinb = ZINB(pi, theta=disp, ridge_lambda=self.ridge, debug=self.debug)
Expand Down Expand Up @@ -508,7 +504,7 @@ def build_output(self):
disp = ConstantDispersionLayer(name='dispersion')
mean = disp(mean)

output = ColwiseMultLayer([mean, self.sf_layer])
output = ColwiseMultLayer()([mean, self.sf_layer])

zinb = ZINB(pi, theta=disp.theta_exp, ridge_lambda=self.ridge, debug=self.debug)
self.loss = zinb.loss
Expand Down Expand Up @@ -622,7 +618,7 @@ def build(self):
# Use separate act. layers to give user the option to get pre-activations
# of layers when requested
if self.activation in advanced_activations:
last_hidden = keras.layers.__dict__[self.activation](name='%s_act'%layer_name)(last_hidden)
last_hidden = tf.keras.layers.__dict__[self.activation](name='%s_act'%layer_name)(last_hidden)
else:
last_hidden = Activation(self.activation, name='%s_act'%layer_name)(last_hidden)

Expand All @@ -646,7 +642,7 @@ def build_output(self):
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name='mean')(self.last_hidden_mean)

output = ColwiseMultLayer([mean, self.sf_layer])
output = ColwiseMultLayer()([mean, self.sf_layer])
output = SliceLayer(0, name='slice')([output, disp, pi])

zinb = ZINB(pi, theta=disp, ridge_lambda=self.ridge, debug=self.debug)
Expand Down Expand Up @@ -726,7 +722,7 @@ def build(self):
# Use separate act. layers to give user the option to get pre-activations
# of layers when requested
if self.activation in advanced_activations:
last_hidden = keras.layers.__dict__[self.activation](name='%s_act'%layer_name)(last_hidden)
last_hidden = tf.keras.layers.__dict__[self.activation](name='%s_act'%layer_name)(last_hidden)
else:
last_hidden = Activation(self.activation, name='%s_act'%layer_name)(last_hidden)

Expand All @@ -747,7 +743,7 @@ def build_output(self):
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name='mean')(self.last_hidden_mean)

output = ColwiseMultLayer([mean, self.sf_layer])
output = ColwiseMultLayer()([mean, self.sf_layer])
output = SliceLayer(0, name='slice')([output, disp])

nb = NB(theta=disp, debug=self.debug)
Expand Down
Loading