|
4 | 4 |
|
5 | 5 | import io |
6 | 6 | import numpy as np |
| 7 | +import os |
7 | 8 | import pandas as pd |
8 | 9 | import warnings |
9 | 10 |
|
|
19 | 20 |
|
20 | 21 | def _create_us_counties_df(st_to_state_name_dict, state_to_st_dict): |
21 | 22 | # URLS |
22 | | - data_url = 'plotly/package_data/data/' |
| 23 | + abs_file_path = os.path.realpath(__file__) |
| 24 | + abs_dir_path = os.path.dirname(abs_file_path) |
23 | 25 |
|
24 | | - shape_pre2010 = 'gz_2010_us_050_00_500k/gz_2010_us_050_00_500k.shp' |
25 | | - shape_pre2010 = data_url + shape_pre2010 |
| 26 | + abs_plotly_dir_path = abs_dir_path[:abs_dir_path.find('/figure_factory')] |
| 27 | + abs_package_data_dir_path = abs_plotly_dir_path + '/package_data/' |
| 28 | + |
| 29 | + shape_pre2010 = 'gz_2010_us_050_00_500k.shp' |
| 30 | + shape_pre2010 = abs_package_data_dir_path + shape_pre2010 |
26 | 31 | df_shape_pre2010 = gp.read_file(shape_pre2010) |
27 | 32 | df_shape_pre2010['FIPS'] = (df_shape_pre2010['STATE'] + |
28 | 33 | df_shape_pre2010['COUNTY']) |
29 | 34 | df_shape_pre2010['FIPS'] = pd.to_numeric(df_shape_pre2010['FIPS']) |
30 | 35 |
|
31 | | - states_path = 'cb_2016_us_state_500k/cb_2016_us_state_500k.shp' |
32 | | - states_path = data_url + states_path |
| 36 | + states_path = 'cb_2016_us_state_500k.shp' |
| 37 | + states_path = abs_package_data_dir_path + states_path |
33 | 38 |
|
34 | 39 | # state df |
35 | 40 | df_state = gp.read_file(states_path) |
36 | 41 | df_state = df_state[['STATEFP', 'NAME', 'geometry']] |
37 | 42 | df_state = df_state.rename(columns={'NAME': 'STATE_NAME'}) |
38 | 43 |
|
39 | | - county_url = 'plotly/package_data/data/cb_2016_us_county_500k/' |
40 | 44 | filenames = ['cb_2016_us_county_500k.dbf', |
41 | 45 | 'cb_2016_us_county_500k.shp', |
42 | 46 | 'cb_2016_us_county_500k.shx'] |
43 | 47 |
|
44 | 48 | for j in range(len(filenames)): |
45 | | - filenames[j] = county_url + filenames[j] |
| 49 | + filenames[j] = abs_package_data_dir_path + filenames[j] |
46 | 50 |
|
47 | 51 | dbf = io.open(filenames[0], 'rb') |
48 | 52 | shp = io.open(filenames[1], 'rb') |
@@ -638,14 +642,14 @@ def create_choropleth(fips, values, scope=['usa'], binning_endpoints=None, |
638 | 642 | list(np.linspace(0, 1, viri_len)) |
639 | 643 | )[1:-1] |
640 | 644 |
|
641 | | - for l in np.linspace(0, 1, len(LEVELS)): |
| 645 | + for L in np.linspace(0, 1, len(LEVELS)): |
642 | 646 | for idx, inter in enumerate(viri_intervals): |
643 | | - if l == 0: |
| 647 | + if L == 0: |
644 | 648 | break |
645 | | - elif inter[0] < l <= inter[1]: |
| 649 | + elif inter[0] < L <= inter[1]: |
646 | 650 | break |
647 | 651 |
|
648 | | - intermed = ((l - viri_intervals[idx][0]) / |
| 652 | + intermed = ((L - viri_intervals[idx][0]) / |
649 | 653 | (viri_intervals[idx][1] - viri_intervals[idx][0])) |
650 | 654 |
|
651 | 655 | float_color = colors.find_intermediate_color( |
|
0 commit comments