Skip to content

Commit

Permalink
Adapted perforation strategy to allow for layering + bug fixes (#296)
Browse files Browse the repository at this point in the history
* src/flownet/data/perforation_strategy.py

* LAYER_ID now recognized in bottom_point, top_point, multiple and multiple_based_on_workovers perforation strategies

* Only wells in layer loop

* Changed multiple function in perforation_strategy.py to work correctly and added test data to a csv

* Bugfix in boundingbox of foreign connections

* Updated unit test to also test synthetic well K, which penetrates both layers

* Added changelog item

Co-authored-by: Wouter J. de Bruin <[email protected]>
  • Loading branch information
LonnekevB and wouterjdb authored Jan 15, 2021
1 parent 5546204 commit 2e44905
Show file tree
Hide file tree
Showing 4 changed files with 117 additions and 293 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ This project adheres to [Semantic Versioning](https://semver.org/).
## Unreleased

### Added
- [#296](https://github.com/equinor/flownet/pull/296) Adapted perforation strategy to allow for layering + bug fixes in the 'multiple' and 'multiple_based_on_workovers' perforation strategies.
- [#284](https://github.com/equinor/flownet/pull/284) Added the option to specify cumulative phase rates as observations (WOPT, WWPT, WGPT, WGIT, WWIT)

### Fixes
Expand Down
80 changes: 42 additions & 38 deletions src/flownet/data/perforation_strategy.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,9 @@ def bottom_point(df: pd.DataFrame) -> pd.DataFrame:
"""
df_multiple = multiple(df)
df_multiple_ever_true = (
df_multiple.groupby(["X", "Y", "Z", "WELL_NAME"]).sum().reset_index()
df_multiple.groupby(["X", "Y", "Z", "WELL_NAME", "LAYER_ID"])
.sum()
.reset_index()
)
df_first_dates = (
df[["WELL_NAME", "DATE"]]
Expand Down Expand Up @@ -50,7 +52,9 @@ def top_point(df: pd.DataFrame) -> pd.DataFrame:
"""
df_multiple = multiple(df)
df_multiple_ever_true = (
df_multiple.groupby(["X", "Y", "Z", "WELL_NAME"]).sum().reset_index()
df_multiple.groupby(["X", "Y", "Z", "WELL_NAME", "LAYER_ID"])
.sum()
.reset_index()
)
df_first_dates = (
df[["WELL_NAME", "DATE"]]
Expand Down Expand Up @@ -84,19 +88,12 @@ def multiple(df: pd.DataFrame) -> pd.DataFrame:
DataFrame with all connections
"""
df = df[["WELL_NAME", "X", "Y", "Z", "DATE", "OPEN"]].sort_values(
["WELL_NAME", "X", "Y", "Z", "DATE"]
df = df[["WELL_NAME", "X", "Y", "Z", "DATE", "OPEN", "LAYER_ID"]].sort_values(
["WELL_NAME", "X", "Y", "Z", "DATE", "LAYER_ID"]
)
df["SHIFT"] = df.groupby(["WELL_NAME", "X", "Y", "Z", "LAYER_ID"])["OPEN"].shift(1)

return df[
(df["OPEN"] != df["OPEN"].shift(1))
| (
(df["X"] != df["X"].shift(1))
& (df["Y"] != df["Y"].shift(1))
& (df["Z"] != df["Z"].shift(1))
)
| (df["WELL_NAME"] != df["WELL_NAME"].shift(1))
]
return df[(df["OPEN"] != df["SHIFT"])].drop("SHIFT", axis=1)


# pylint: disable=too-many-locals
Expand All @@ -105,7 +102,7 @@ def multiple_based_on_workovers(df: pd.DataFrame) -> pd.DataFrame:
This strategy creates multiple connections per well when the well during the historic production period has been
straddled or plugged (i.e., individual connections have been shut).
The following steps are performed:
The following steps are performed per layer:
1. Split connections into groups of connections per well, based on their open/closing history. That is,
connections that have seen opening or closure at the same moment in time are considered a group. This is
Expand All @@ -128,34 +125,37 @@ def multiple_based_on_workovers(df: pd.DataFrame) -> pd.DataFrame:
df = multiple(df)

df_w_groups = pd.DataFrame(
[], columns=["WELL_NAME", "X", "Y", "Z", "DATE", "OPEN", "GROUPID"]
[], columns=["WELL_NAME", "X", "Y", "Z", "DATE", "OPEN", "GROUPID", "LAYER_ID"]
)
df_groups = pd.DataFrame([], columns=["X", "Y", "Z", "GROUPID"])
groupid = 0

# Step 1
for well_name in df["WELL_NAME"].unique():
df_well = df.loc[df["WELL_NAME"] == well_name][
["X", "Y", "Z", "WELL_NAME", "OPEN", "DATE"]
]
df_well_piv = df_well.pivot_table("OPEN", ["X", "Y", "Z", "WELL_NAME"], "DATE")
df_well_piv.fillna(method="ffill", axis=1, inplace=True)
df_well_piv.fillna(False, inplace=True)
df_well_piv = df_well_piv.apply(lambda x: hash(tuple(x)), axis=1)

for group in df_well_piv.unique():
df_group = (
df_well_piv.loc[df_well_piv == group]
.index.to_frame()
.reset_index(drop=True)[["X", "Y", "Z"]]
for layer in df["LAYER_ID"].unique():
for well_name in df[df["LAYER_ID"] == layer]["WELL_NAME"].unique():
df_well = df.loc[
(df["WELL_NAME"] == well_name) & (df["LAYER_ID"] == layer)
][["X", "Y", "Z", "WELL_NAME", "LAYER_ID", "OPEN", "DATE"]]
df_well_piv = df_well.pivot_table(
"OPEN", ["X", "Y", "Z", "WELL_NAME", "LAYER_ID"], "DATE"
)
df_group["GROUPID"] = groupid
groupid += 1
df_groups = df_groups.append(df_group)
df_well_piv.fillna(method="ffill", axis=1, inplace=True)
df_well_piv.fillna(False, inplace=True)
df_well_piv = df_well_piv.apply(lambda x: hash(tuple(x)), axis=1)

for group in df_well_piv.unique():
df_group = (
df_well_piv.loc[df_well_piv == group]
.index.to_frame()
.reset_index(drop=True)[["X", "Y", "Z"]]
)
df_group["GROUPID"] = groupid
groupid += 1
df_groups = df_groups.append(df_group)

df_w_groups = df_w_groups.append(
df_well.merge(df_groups, how="left", on=["X", "Y", "Z"])
)
df_w_groups = df_w_groups.append(
df_well.merge(df_groups, how="left", on=["X", "Y", "Z"])
)

# Step 2
for groupid in df_w_groups["GROUPID"].unique():
Expand All @@ -167,8 +167,8 @@ def multiple_based_on_workovers(df: pd.DataFrame) -> pd.DataFrame:
df_foreign = df_w_groups.loc[
(
((df_w_groups["X"] > xmin) & (df_w_groups["X"] < xmax))
| ((df_w_groups["Y"] > ymin) & (df_w_groups["Y"] < ymax))
| ((df_w_groups["Z"] > zmin) & (df_w_groups["Z"] < zmax))
& ((df_w_groups["Y"] > ymin) & (df_w_groups["Y"] < ymax))
& ((df_w_groups["Z"] > zmin) & (df_w_groups["Z"] < zmax))
)
& (df_w_groups["GROUPID"] != groupid)
]
Expand Down Expand Up @@ -203,7 +203,11 @@ def multiple_based_on_workovers(df: pd.DataFrame) -> pd.DataFrame:
)

df_w_groups["OPEN"] = df_w_groups["OPEN"].astype(int)
result = df_w_groups.groupby(["WELL_NAME", "DATE", "GROUPID"]).mean().reset_index()
result = (
df_w_groups.groupby(["WELL_NAME", "DATE", "GROUPID", "LAYER_ID"])
.mean()
.reset_index()
)
result["OPEN"] = result["OPEN"].astype(bool)
result.drop("GROUPID", axis=1, inplace=True)

Expand Down
42 changes: 42 additions & 0 deletions tests/data/well_perforations_2layers.csv
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
WELL_NAME,X,Y,Z,DATE,OPEN,LAYER_ID
A,1.0,1.0,1.0,2021-01-12 09:53:52.832254,True,0
A,2.0,2.0,2.0,2021-01-12 09:53:52.832254,False,0
A,1.0,1.0,1.0,2021-01-13 09:53:52.832254,False,0
A,2.0,2.0,2.0,2021-01-13 09:53:52.832254,True,0
B,3.0,3.0,3.0,2021-01-12 09:53:52.832254,True,0
B,4.0,4.0,4.0,2021-01-12 09:53:52.832254,True,0
B,3.0,3.0,3.0,2021-01-13 09:53:52.832254,False,0
B,4.0,4.0,4.0,2021-01-13 09:53:52.832254,False,0
C,5.0,5.0,5.0,2021-01-13 09:53:52.832254,False,0
C,6.0,6.0,6.0,2021-01-13 09:53:52.832254,True,0
D,7.0,7.0,7.0,2021-01-11 09:53:52.832254,False,0
D,7.0,7.0,7.0,2021-01-12 09:53:52.832254,False,0
D,7.0,7.0,7.0,2021-01-13 09:53:52.832254,True,0
E,8.0,8.0,8.0,2021-01-11 09:53:52.832254,True,0
E,8.0,8.0,8.0,2021-01-13 09:53:52.832254,False,0
F,9.0,9.0,9.0,2021-01-13 09:53:52.832254,False,1
G,10.0,10.0,10.0,2021-01-11 09:53:52.832254,True,1
G,11.0,11.0,11.0,2021-01-11 09:53:52.832254,True,1
G,10.0,10.0,10.0,2021-01-12 09:53:52.832254,True,1
G,11.0,11.0,11.0,2021-01-12 09:53:52.832254,False,1
H,12.0,12.0,12.0,2021-01-11 09:53:52.832254,True,1
H,13.0,13.0,13.0,2021-01-11 09:53:52.832254,False,1
H,12.0,12.0,12.0,2021-01-12 09:53:52.832254,True,1
H,13.0,13.0,13.0,2021-01-12 09:53:52.832254,True,1
I,14.0,14.0,14.0,2021-01-11 09:53:52.832254,True,1
I,14.0,14.0,14.0,2021-01-12 09:53:52.832254,True,1
I,14.0,14.0,14.0,2021-01-13 09:53:52.832254,True,1
J,15.0,15.0,15.0,2021-01-11 09:53:52.832254,True,1
J,16.0,16.0,16.0,2021-01-11 09:53:52.832254,True,1
J,17.0,17.0,17.0,2021-01-11 09:53:52.832254,True,1
J,15.0,15.0,15.0,2021-01-13 09:53:52.832254,True,1
J,16.0,16.0,16.0,2021-01-13 09:53:52.832254,False,1
J,17.0,17.0,17.0,2021-01-13 09:53:52.832254,True,1
K,18.0,18.0,4.0,2021-01-12 09:53:52.832254,True,0
K,18.0,18.0,4.0,2021-01-13 09:53:52.832254,True,0
K,18.0,18.0,7.0,2021-01-12 09:53:52.832254,True,0
K,18.0,18.0,7.0,2021-01-13 09:53:52.832254,True,0
K,18.0,18.0,10.0,2021-01-12 09:53:52.832254,True,1
K,18.0,18.0,10.0,2021-01-13 09:53:52.832254,True,1
K,18.0,18.0,12.0,2021-01-12 09:53:52.832254,True,1
K,18.0,18.0,12.0,2021-01-13 09:53:52.832254,True,1
Loading

0 comments on commit 2e44905

Please sign in to comment.