import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import panel as pn
import re
from openpyxl import load_workbook
from io import BytesIO
from datetime import datetime
import param
import hvplot.pandas
from io import StringIO
import html
import holoviews as hv
from holoviews import opts
from bokeh.plotting import figure, show
from bokeh.models import TextInput, HoverTool, WheelZoomTool, LinearAxis, Range1d, ColumnDataSource, NumeralTickFormatter, LabelSet, Legend, LegendItem, CategoricalColorMapper, FactorRange, Title, DatetimeTickFormatter, CustomJS, CustomJSHover, CDSView, BooleanFilter, HTMLTemplateFormatter, ImageURL, Div, CustomJSTickFormatter
from bokeh.palettes import Category10, Category20
from bokeh.layouts import column, row
import warnings
import bokeh.plotting as bkp
import base64
import io
import os
from PIL import Image
from bokeh.transform import factor_cmap
from bokeh.models.glyphs import VBar # Add this import
from bokeh.palettes import Category20c
from bokeh.transform import cumsum
import numpy as np
###################################################################################################################################
# Enable the Panel extension
pn.extension()
# Suppress all warnings
warnings.filterwarnings("ignore")
# Load the Tabulator extension
pn.extension('tabulator')
# Initialize HoloViews and Panel extensions
hv.extension('bokeh')
##############################################################################################################################
# Define date and path
##############################################################################################################################
# Define paths and file names
input_file_formatted = 'CM-Transfer_Project-Overview.xlsx'
##############################################################################################################################
# Load workbook
##############################################################################################################################
# Load the Excel files into pandas DataFrames
try:
df_Summary = pd.read_excel(input_file_formatted, sheet_name='Summary', index_col=False)
df_Priority = pd.read_excel(input_file_formatted, sheet_name='CM-Priority', index_col=False)
df_Snapshot = pd.read_excel(input_file_formatted, sheet_name='Snapshot', index_col=False)
df_TurnoverReport = pd.read_excel(input_file_formatted, sheet_name='CM-TurnoverReport', index_col=False)
df_Backlog = pd.read_excel(input_file_formatted, sheet_name='CM-Backlog', index_col=False)
df_WIP = pd.read_excel(input_file_formatted, sheet_name='CM-WIP', index_col=False)
df_PendingReport = pd.read_excel(input_file_formatted, sheet_name='PendingReport', index_col=False)
df_ADCNReport = pd.read_excel(input_file_formatted, sheet_name='CM-ADCNReport', index_col=False)
df_Historic = pd.read_excel(input_file_formatted, sheet_name='CM-Historic', index_col=False)
print("Input files loaded successfully.")
except FileNotFoundError as e:
print(f"File not found: {e}")
exit()
# Load the workbook
try:
workbook = load_workbook(input_file_formatted)
print(f"Successfully loaded '{input_file_formatted}'")
except FileNotFoundError as e:
print(f"File not found: {e}")
exit()
#--------------------------------------------------
# Define 'file date' based on the value od W2 in df_Backlog
#--------------------------------------------------
# Open the workbook
workbook = load_workbook(input_file_formatted, data_only=True) # 'data_only' ensures formulas are evaluated
backlog_sheet = workbook["CM-Backlog"] # Load the CM-Backlog sheet
# Extract file date from cell W2
file_date = backlog_sheet["W2"].value
if file_date:
print("File Date:", file_date)
else:
print("Could not retrieve File Date from W2.")
# Close the workbook
workbook.close()
################################################################
# Filter out "Canceled" or "To be transferred" from df_priority
################################################################
# Filter out rows where 'Production Status' is 'Canceled' or 'To be transferred' or 'Officially transferred'
if 'Production Status' in df_Priority.columns:
df_Priority = df_Priority[~df_Priority['Production Status'].isin(['Canceled', 'To be transferred', 'Officially transferred'])]
#----------------------------------------------------------
# 02/11 - Change 'Phase 4' or 'Phase 5' with 'Phase 4-5'
#----------------------------------------------------------
# For df_Priority
if 'Program' in df_Priority.columns and 'Pty Indice' in df_Priority.columns:
mask = (
df_Priority['Program'].isin(['Phase 4', 'Phase 5']) &
~df_Priority['Pty Indice'].str.contains('Phase5', na=False)
)
df_Priority.loc[mask, 'Program'] = 'Phase 4-5'
# For df_Backlog
if 'Program' in df_Backlog.columns and 'Pty Indice' in df_Backlog.columns:
mask = (
df_Backlog['Program'].isin(['Phase 4', 'Phase 5']) &
~df_Backlog['Pty Indice'].str.contains('Phase5', na=False)
)
df_Backlog.loc[mask, 'Program'] = 'Phase 4-5'
# For df_TurnoverReport
if 'Program' in df_TurnoverReport.columns and 'Pty Indice' in df_TurnoverReport.columns:
mask = (
df_TurnoverReport['Program'].isin(['Phase 4', 'Phase 5']) &
~df_TurnoverReport['Pty Indice'].str.contains('Phase5', na=False)
)
df_TurnoverReport.loc[mask, 'Program'] = 'Phase 4-5'
# For df_Historic
if 'Program' in df_Historic.columns and 'Pty Indice' in df_Historic.columns:
mask = (
df_Historic['Program'].isin(['Phase 4', 'Phase 5']) &
~df_Historic['Pty Indice'].str.contains('Phase5', na=False)
)
df_Historic.loc[mask, 'Program'] = 'Phase 4-5'
# For df_Snapshot (new addition)
if 'Program' in df_Snapshot.columns and 'Pty Indice' in df_Snapshot.columns:
mask = (
df_Snapshot['Program'].isin(['Phase 4', 'Phase 5']) &
~df_Snapshot['Pty Indice'].str.contains('Phase5', na=False)
)
df_Snapshot.loc[mask, 'Program'] = 'Phase 4-5'
#----------------------------------------------------------
#*****************************************************************************************************************************
# |General Overview| - Table creation
#*****************************************************************************************************************************
#--------------------------------------------------------------------------
# Create pivot_table_combined
#--------------------------------------------------------------------------
# Create a new column 'Industrialization'
df_Snapshot['Industrialization'] = df_Snapshot['Production Status'].apply(
lambda x: 'Industrialized' if x.strip() in ['Industrialized', 'Completed'] else 'Not Industrialized'
)
# Fill empty 'Qty WIP' with 0
df_Snapshot['Qty WIP'] = df_Snapshot['Qty WIP'].fillna(0)
#######################################################
# Assuming df_Priority has the columns 'Pty Indice' and 'Priority'
priority_mapping = df_Priority.set_index('Pty Indice')['Priority'].to_dict()
# Create the pivot table without 'Priority'
pivot_table_13 = pd.pivot_table(df_Snapshot,
index=['Top-Level Status', 'Industrialization', 'Product Category', 'Pty Indice'],
values=['IDD Backlog Qty', 'Remain. crit. Qty', 'Qty clear to build', 'Qty WIP', 'Shipped', 'Critical Qty'],
aggfunc='sum',
fill_value=0).reset_index()
# Add the 'Priority' column to pivot_table_13 using the mapping
pivot_table_13['Priority'] = pivot_table_13['Pty Indice'].map(priority_mapping)
###################### New 08/12 #########################################
# Add the 'Program' column to pivot_table_13 using the mapping
program_mapping = df_Priority.set_index('Pty Indice')['Program'].to_dict()
pivot_table_13['Program'] = pivot_table_13['Pty Indice'].map(program_mapping)
#########################################################
# Merge df_Backlog with df_Snapshot to get additional columns
merged_df = pd.merge(df_Backlog, df_Snapshot[['Pty Indice', 'Qty clear to build', 'Top-Level Status', 'Qty WIP', 'Industrialization', 'Product Category', 'Shipped', 'Critical Qty', 'Priority']], on='Pty Indice', how='left')
# Rename 'Backlog row Qty' to 'IDD Backlog Qty'
merged_df.rename(columns={'Backlog row Qty': 'IDD Backlog Qty'}, inplace=True)
# Create 'Order Type' column
merged_df['Order Type'] = merged_df['Order'].apply(lambda x: 'DX/DO' if str(x).startswith('D') else ('Standard' if 'NC' not in str(x) else None))
# Aggregate IDD Backlog Qty by 'Pty Indice', 'Order Type', and other relevant columns
unique_merged_df = merged_df.groupby(
['Pty Indice', 'Top-Level Status', 'Industrialization', 'Product Category', 'Order Type']
).agg({
'IDD Backlog Qty': 'sum', # Sum IDD Backlog Qty for unique combinations
'Qty clear to build': 'sum',
'Remain. crit. Qty': 'sum',
'Qty WIP': 'sum',
}).reset_index()
# Create a pivot table to separate IDD Backlog Qty by Order Type
pivot_order_type = unique_merged_df.pivot_table(
index=['Pty Indice', 'Top-Level Status', 'Industrialization', 'Product Category'],
columns='Order Type',
values='IDD Backlog Qty',
aggfunc='sum', # Sum values to ensure accurate totals
fill_value=0 # Fill NaNs with 0
).reset_index()
# Flatten MultiIndex columns
pivot_order_type.columns = [f'{col[0]}_{col[1]}' if col[1] != '' else col[0] for col in pivot_order_type.columns]
# Rename columns to be more descriptive
pivot_order_type.columns = ['Pty Indice', 'Top-Level Status', 'Industrialization', 'Product Category', 'DX_Order_Type', 'Standard_Order_Type']
# Merge the two pivot tables
pivot_table_combined = pd.merge(
pivot_table_13,
pivot_order_type,
on=['Pty Indice', 'Top-Level Status', 'Industrialization', 'Product Category'],
how='left'
)
# Rename the remaining columns for clarity
pivot_table_combined.rename(columns={
'Top-Level Status_x': 'Top-Level Status',
'Industrialization_x': 'Industrialization',
'Product Category_x': 'Product Category',
'DX_Order_Type': 'DPAS Order',
'Standard_Order_Type':'Standard Order',
'Shipped':'Qty Shipped',
'Critical Qty': 'Total Critical Qty'
}, inplace=True)
#Ordering pivot_table_combined to get the column in the relevant order of relevance for the bar chart
# Define the desired column order
desired_column_order = [
'Industrialization',
'Top-Level Status',
'Product Category',
'Priority',
'Pty Indice',
'Standard Order',
'DPAS Order',
'Qty WIP',
'Qty clear to build',
'Total Critical Qty',
'Qty Shipped',
'Remain. crit. Qty',
'IDD Backlog Qty'
]
# Reorder the columns in pivot_table_combined
pivot_table_combined = pivot_table_combined[desired_column_order]
# Ensure 'Priority' is in the correct data type (int or float)
# Convert 'Priority' column to numeric, coercing errors (non-numeric entries will become NaN)
pivot_table_combined['Priority'] = pd.to_numeric(pivot_table_combined['Priority'], errors='coerce')
pivot_table_combined['Priority'].fillna(999, inplace=True)
# Fill all other than column then 'Priotity' containing NaN values with 0 in the entire DataFrame
pivot_table_combined.fillna(0, inplace=True)
# Define the custom sort orders, including the additional categories
industrialization_order = pivot_table_combined['Industrialization'].unique().tolist()
top_level_status_order = pivot_table_combined['Top-Level Status'].unique().tolist() + ['All Top-Level Status']
product_category_order = pivot_table_combined['Product Category'].unique().tolist() + ['All Product Categories']
# Set the categories and order for sorting
pivot_table_combined['Industrialization'] = pd.Categorical(
pivot_table_combined['Industrialization'],
categories=pivot_table_combined['Industrialization'].unique().tolist(),
ordered=True
)
pivot_table_combined['Top-Level Status'] = pd.Categorical(
pivot_table_combined['Top-Level Status'],
categories=pivot_table_combined['Top-Level Status'].unique().tolist(),
ordered=True
)
pivot_table_combined['Product Category'] = pd.Categorical(
pivot_table_combined['Product Category'],
categories=pivot_table_combined['Product Category'].unique().tolist(),
ordered=True
)
# Sort by Priority; specify na_position if needed (e.g., na_position='last')
pivot_table_combined.sort_values(by=['Priority', 'Pty Indice'], inplace=True, na_position='last')
#--------------------------------------------------------------------------
# Create pivot_table_14
#--------------------------------------------------------------------------
##############################################################################################################################
# Financial KPI datafram to be update with df_Historic
# --> To be update 09/23 to use df_Historic instead of df_Snapshot to calculate the 'Realized sales' and 'Realized Margin'. The calculation should be based on the real data from the df_Historic trunover Report including the change of price over time
### Need to create 'IDD Current Sales (Total)' and ['IDD Current Margin (Total)'] should be based on df_Historic --> AVG of the sales and margin should work
# --> # Need to use 'IDD AVG realized sales price [USD]' & ['IDD AVG realized Margin[%]'] instead of the 'IDD Current Sales (Total)' & ['IDD Current Margin (Total)']
# New columns introduced in df_Snapshot:
# df_snapshot['IDD AVG realized sales price [USD]']
# df_snapshot['IDD AVG realized Margin Standard [USD]']
# df_snapshot['IDD AVG realized Margin [%]']
##############################################################################################################################
# Creating Graph#14 [IDD Expected Total Sales & IDD Marge per Pty Indice by Top-Level Status, Production Status & Product Category]
##############################################################################################################################
# Calculate 'IDD Expected Total Sales'
df_Snapshot['IDD Expected Total Sales'] = df_Snapshot['IDD Backlog Qty'] * df_Snapshot['IDD Sale Price']
# Calculate 'IDD Current Total Sales'
df_Snapshot['IDD Current Sales (Total)'] = df_Snapshot['Shipped'] * df_Snapshot['IDD Sale Price']
# Calculate 'IDD Expected Total Marge'
df_Snapshot['IDD Expected Total Margin'] = df_Snapshot['IDD Backlog Qty'] * df_Snapshot['IDD Marge Standard (unit)']
# Calculate 'IDD Current Total Marge'
df_Snapshot['IDD Current Margin (Total)'] = df_Snapshot['Shipped'] * df_Snapshot['IDD Marge Standard (unit)']
df_Snapshot['Industrialization'] = df_Snapshot['Production Status'].apply(
lambda x: 'Industrialized' if x.strip() in ['Industrialized', 'Completed'] else 'Not Industrialized'
)
##############################################################################################################################
# Assuming df_Priority has the columns 'Pty Indice' and 'Priority'
priority_mapping = df_Priority.set_index('Pty Indice')['Priority'].to_dict()
# Create the pivot table without 'Priority'
pivot_table_14 = pd.pivot_table(df_Snapshot,
index=['Top-Level Status', 'Industrialization', 'Product Category', 'Pty Indice'],
values=['IDD Expected Total Sales', 'IDD Expected Total Margin', 'IDD Current Margin (%)', 'Priority', 'Critical Qty', 'Shipped', 'IDD Backlog Qty', 'IDD Current Sales (Total)', 'IDD Current Margin (Total)', 'IDD Expected ROI (Total)', 'IDD AVG realized sales price [USD]', 'IDD AVG realized Margin Standard [USD]', 'IDD AVG realized Margin [%]'],
aggfunc='sum',
fill_value=0).reset_index()
# Add the 'Priority' column to pivot_table_13 using the mapping
pivot_table_14['Priority'] = pivot_table_14['Pty Indice'].map(priority_mapping)
# Add the 'Program' column to pivot_table_14 using the mapping
pivot_table_14['Program'] = pivot_table_14['Pty Indice'].map(program_mapping)
# Map 'DPAS Order' from pivot_table_combined on 'Pty Indice' column
pivot_table_14 = pivot_table_14.merge(pivot_table_combined[['Pty Indice', 'DPAS Order']], on='Pty Indice', how='left')
# Calculate '% Completion' and round to one decimal place
pivot_table_14['% Completion'] = round((pivot_table_14['Shipped'] / pivot_table_14['Critical Qty']) * 100, 1)
pivot_table_14['% Completion Total Backlog'] = round((pivot_table_14['Shipped'] / (pivot_table_14['IDD Backlog Qty'] + pivot_table_14['Shipped'])) * 100, 1) # New 09/26, updated 10/08 because Total Backlog should be 'IDD Backlog Qty' + 'Shipped' to consider the initial backlog
# Calculate '% DPAS Order' and round to one decimal place
pivot_table_14['% DPAS Order'] = round((pivot_table_14['DPAS Order'] / pivot_table_14['IDD Backlog Qty']) * 100, 1)
# Define the sort order for both columns
industrialization_order = ['Industrialized', 'Not Industrialized']
top_level_status_order = ['Clear-to-Build', 'Short', 'Completed - No Backlog'] # Update 09/16
# Set the categories and order for sorting
pivot_table_14['Industrialization'] = pd.Categorical(pivot_table_14['Industrialization'], categories=industrialization_order, ordered=True)
pivot_table_14['Top-Level Status'] = pd.Categorical(pivot_table_14['Top-Level Status'], categories=top_level_status_order, ordered=True)
# Sort by Industrialization, then by Top-Level Status, and finally by Product Category
pivot_table_14.sort_values(by=['Industrialization', 'Top-Level Status', 'Product Category'], inplace=True)
# delete '%' from 'IDD Current Margin (%)'
pivot_table_14['IDD Current Margin (%)'] = pivot_table_14['IDD Current Margin (%)'].str.replace('%', '').astype(float)
# Round the values to one decimal place
pivot_table_14['IDD Current Margin (%)'] = pivot_table_14['IDD Current Margin (%)'].round()
# Convert 'IDD Expected ROI (Total)' to string, replacing NaN with '0%'
pivot_table_14['IDD Expected ROI (Total)'] = pivot_table_14['IDD Expected ROI (Total)'].fillna('0%').astype(str)
# Remove '%' and convert to float
pivot_table_14['IDD Expected ROI (Total)'] = pivot_table_14['IDD Expected ROI (Total)'].str.replace('%', '').astype(float)
# Round the values to one decimal place
pivot_table_14['IDD Expected ROI (Total)'] = pivot_table_14['IDD Expected ROI (Total)'].round()
# Round the values to one decimal place
pivot_table_14['IDD Expected Total Sales'] = pivot_table_14['IDD Expected Total Sales'].round()
pivot_table_14['IDD Expected Total Margin'] = pivot_table_14['IDD Expected Total Margin'].round()
pivot_table_14['IDD Current Sales (Total)'] = pivot_table_14['IDD Current Sales (Total)'].round()
pivot_table_14['IDD Current Margin (Total)'] = pivot_table_14['IDD Current Margin (Total)'].round()
#######################################################################
# Step 1: Clean and convert 'IDD AVG realized Margin [%]'
pivot_table_14['IDD AVG realized Margin [%]'] = pivot_table_14['IDD AVG realized Margin [%]'].str.replace('%', '').astype(float)
# Round the margin to 1 decimal place
pivot_table_14['IDD AVG realized Margin [%]'] = pivot_table_14['IDD AVG realized Margin [%]'].round(1)
# Step 2: Clean and convert 'IDD AVG realized Margin Standard [USD]'
# Remove '$' and ',' before conversion
pivot_table_14['IDD AVG realized Margin Standard [USD]'] = (
pivot_table_14['IDD AVG realized Margin Standard [USD]']
.replace({'\\$': '', ',': ''}, regex=True)
)
# Use pd.to_numeric to handle conversion and coercion
pivot_table_14['IDD AVG realized Margin Standard [USD]'] = pd.to_numeric(pivot_table_14['IDD AVG realized Margin Standard [USD]'], errors='coerce').round(2)
# Step 3: Clean and convert 'IDD AVG realized sales price [USD]'
pivot_table_14['IDD AVG realized sales price [USD]'] = (
pivot_table_14['IDD AVG realized sales price [USD]']
.replace({'\\$': '', ',': ''}, regex=True)
)
# Use pd.to_numeric to handle conversion and coercion
pivot_table_14['IDD AVG realized sales price [USD]'] = pd.to_numeric(pivot_table_14['IDD AVG realized sales price [USD]'], errors='coerce').round(2)
# Step 4: Calculate 'IDD Realized Sales'
pivot_table_14['IDD Realized Sales'] = pivot_table_14['IDD AVG realized sales price [USD]'] * pivot_table_14['Shipped']
# Step 5: Calculate 'IDD Realized Margin'
pivot_table_14['IDD Realized Margin'] = pivot_table_14['IDD AVG realized Margin Standard [USD]'] * pivot_table_14['Shipped']
# Step 6: Format 'IDD AVG realized sales price [USD]' as currency and replace NaN with 0
pivot_table_14['IDD AVG realized sales price [USD]'] = pivot_table_14['IDD AVG realized sales price [USD]'].apply(lambda x: f"${x:,.2f}" if pd.notna(x) else '$0.00')
# Step 7: Format 'IDD AVG realized Margin Standard [USD]' as currency and replace NaN with 0
pivot_table_14['IDD AVG realized Margin Standard [USD]'] = pivot_table_14['IDD AVG realized Margin Standard [USD]'].apply(lambda x: f"${x:,.2f}" if pd.notna(x) else '$0.00')
# Step 8: Format 'IDD AVG realized Margin [%]' as percentage and replace NaN with 0
pivot_table_14['IDD AVG realized Margin [%]'] = pivot_table_14['IDD AVG realized Margin [%]'].apply(lambda x: f"{x:.1f}%" if pd.notna(x) else '0.0%')
# Ensure 'Priority' is in the correct data type (int or float)
pivot_table_14['Priority'] = pd.to_numeric(pivot_table_14['Priority'], errors='coerce')
# Sort the DataFrame by 'Priority' in ascending order (use ascending=False for descending order)
#pivot_table_14 = pivot_table_14.sort_values(by='Priority', ascending=True)
pivot_table_14.sort_values(by=['Priority', 'Pty Indice'], inplace=True)
#--------------------------------------------------------------------------
# Create pivot_table_15 - Production metrics Expected Time, Actual Time, Standard Deviation
#--------------------------------------------------------------------------
# Create df_MaekArchi = |CM-MakeArchitecture|
df_MakeArchi = pd.read_excel(input_file_formatted, sheet_name='CM-MakeArchitecture', index_col=False)
# df_Production = df_Snapshot['Priority', 'Pty Indice', 'Program', 'Expected Time [hour]', 'Actual Time [hour]', 'Standard Deviation [hour]']
df_Production = df_Snapshot.copy()
#Selected relevant columns
df_Production = df_Production[['Top-Level Status', 'Priority', 'Pty Indice', 'IDD Top Level', 'SEDA Top Level', 'Production Status', 'Product Category', 'Program', 'Max Expected Time (full ASSY)[hour]', 'Avg Actual Time (full ASSY)[hour]', 'Max Standard Deviation [hour]', 'Total WO Count']]
# Rename the selected columns
df_Production = df_Production.rename(columns={
'Max Expected Time (full ASSY)[hour]': 'Expected Time',
'Avg Actual Time (full ASSY)[hour]': 'Actual Time',
'Max Standard Deviation [hour]': 'Standard Deviation',
})
df_Production['Industrialization'] = df_Production['Production Status'].apply(
lambda x: 'Industrialized' if x.strip() in ['Industrialized', 'Completed'] else 'Not Industrialized'
)
# Ensure 'Priority' is in the correct data type (int or float)
df_Production['Priority'] = pd.to_numeric(df_Production['Priority'], errors='coerce')
# Define sort orders
top_level_status_order_prod = sorted(df_Production['Top-Level Status'].unique().tolist()) # Ensure it's unique and sorted
product_category_order_prod = sorted(df_Production['Product Category'].unique().tolist()) # Ensure it's unique and sorted
# Optionally, you might want to fill or drop NaNs depending on your requirement
# For example, fill NaNs with a default value 999:
df_Production['Priority'].fillna(999, inplace=True)
# Fill all other than column then 'Priotity' containing NaN values with 0 in the entire DataFrame
df_Production.fillna(0, inplace=True)
# Convert 'Priority' column to integers
df_Production['Priority'] = df_Production['Priority'].astype(int)
# Set categorical data type for 'Industrialization' column
df_Production['Industrialization'] = pd.Categorical(
df_Production['Industrialization'],
categories=['Industrialized', 'Not Industrialized'],
ordered=False
)
# Set categorical data types with the specified sort orders for other columns
df_Production['Top-Level Status'] = pd.Categorical(
df_Production['Top-Level Status'],
categories=top_level_status_order_prod,
ordered=False
)
df_Production['Product Category'] = pd.Categorical(
df_Production['Product Category'],
categories=product_category_order_prod,
ordered=False
)
# Sort by Priority; specify na_position if needed (e.g., na_position='last')
df_Production.sort_values(by=['Priority', 'Pty Indice'], inplace=True, na_position='last')
###########################################################################################################################################
## Map 'WO_Count' from Top-Level (Level = 0) from |CM-MakeArchitecture| (df_MakeArchi) on 'Pty Indice' and rename 'Top-Level WO Count'
## Map 'Avg Actual Time (unit)[hour]' from Top-Level (Level = 0) from |CM-MakeArchitecture| (df_MakeArchi) on 'Pty Indice' and rename 'Actual Time (Top-Level only)'
###########################################################################################################################################
# Include 'Total Top-Level Qty' and 'Total Components Qty' in he table 'Top-Level WO Count'*'Qty per WO' and 'Total WO Count'*'Total Components Qty'
# Filter df_MakeArchi where Level == 0 to get Top-Level
top_level_df = df_MakeArchi[df_MakeArchi['Level'] == 0]
# Filter df_MakeArchi where Level != 0 to get sub-Level
sub_level_df = df_MakeArchi[df_MakeArchi['Level'] != 0]
# Create a dictionary mapping Pty Indice to WO_Count and Avg Actual Time (unit)[hour]
wo_count_top_level_mapping = dict(zip(top_level_df['Pty Indice'], top_level_df['WO_Count']))
avg_actual_time_mapping = dict(zip(top_level_df['Pty Indice'], top_level_df['Avg Actual Time (unit)[hour]']))
# Map 'Top-Level WO Count' to df_Production using the mapping dictionary
df_Production['Top-Level WO Count'] = df_Production['Pty Indice'].map(wo_count_top_level_mapping)
df_Production['Actual Time (Top-Level only)'] = df_Production['Pty Indice'].map(avg_actual_time_mapping)
#New 09/10 --> Include 'Total Top-Level Qty' and 'Total Components Qty' in he table 'Top-Level WO Count'*'Qty per WO' and 'Total WO Count'*'Total Components Qty'
qty_top_level_count_mapping = dict(zip(top_level_df['Pty Indice'], top_level_df['Qty_Count']))
qty_sub_level_count_mapping = dict(zip(sub_level_df['Pty Indice'], sub_level_df['Qty_Count']))
df_Production['Total Top-Level Qty'] = df_Production['Pty Indice'].map(qty_top_level_count_mapping)
df_Production['Total sub-Level Qty'] = df_Production['Pty Indice'].map(qty_sub_level_count_mapping)
# Fill NaN values in specific columns with 0
df_Production[['Total WO Count', 'Top-Level WO Count', 'Total Top-Level Qty', 'Total sub-Level Qty']] = df_Production[['Total WO Count', 'Top-Level WO Count', 'Total Top-Level Qty', 'Total sub-Level Qty']].fillna(0)
# Ensure there are no infinite values (inf) in the columns
df_Production[['Total WO Count', 'Top-Level WO Count', 'Total Top-Level Qty', 'Total sub-Level Qty']] = df_Production[['Total WO Count', 'Top-Level WO Count', 'Total Top-Level Qty', 'Total sub-Level Qty']].replace([np.inf, -np.inf], 0)
# Convert 'Total WO Count' and 'Top-Level WO Count' to integer
df_Production['Total WO Count'] = df_Production['Total WO Count'].astype(int)
df_Production['Top-Level WO Count'] = df_Production['Top-Level WO Count'].astype(int)
df_Production['Total Top-Level Qty'] = df_Production['Total Top-Level Qty'].astype(int)
df_Production['Total sub-Level Qty'] = df_Production['Total sub-Level Qty'].astype(int)
# Optionally fill NaNs in 'Actual Time (Top-Level only)' with 0 or another value if necessary
df_Production['Actual Time (Top-Level only)'] = df_Production['Actual Time (Top-Level only)'].fillna(0)
# Round(1) 'Expected Time', 'Actual Time, 'Standard Deviation', 'Actual Time (Top-Level only)'
df_Production['Expected Time'] = df_Production['Expected Time'].round(1)
df_Production['Actual Time'] = df_Production['Actual Time'].round(1)
df_Production['Standard Deviation'] = df_Production['Standard Deviation'].round(1)
df_Production['Actual Time (Top-Level only)'] = df_Production['Actual Time (Top-Level only)'].round(1)
#Rename 'Expected Time' to 'Standard Time' for better clarifity on the graph
df_Production.rename(columns={'Expected Time': 'Standard Time (Routing, full ASSY)'}, inplace=True)
df_Production.rename(columns={'Actual Time': 'Actual Time (AVG Prod, full ASSY)'}, inplace=True)
df_Production.rename(columns={'Standard Deviation': 'Standard Deviation (on Actual Time, full ASSY)'}, inplace=True)
df_Production.rename(columns={'Actual Time (Top-Level only)': 'Actual Time (AVG Prod, Top-Level only)'}, inplace=True)
#Create Pivot_table_15 base on df_Production only for |General Overview| & Only keep Pivot_table_15['Program'] = 'Phase 4'
pivot_table_15 = df_Production.copy()
#||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#*****************************************************************************************************************************
##############################################################################################################################
# |4 cadrans|
##############################################################################################################################
#*****************************************************************************************************************************
#||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
# Create titles for each quadrant
engineering_title = "Engineering"
sales_title = "Sales"
supply_chain_title = "Supply Chain"
production_title = "Production"
# Create a mapping dictionary from df_Priority
indice_to_program = dict(zip(df_Priority['Pty Indice'], df_Priority['Program']))
indice_to_priority = dict(zip(df_Priority['Pty Indice'], df_Priority['Priority']))
# Apply mapping to create 'Program' column in df_Summary and df_Backlog
df_Summary['Program'] = df_Summary['Pty Indice'].map(indice_to_program)
df_Backlog['Program'] = df_Backlog['Pty Indice'].map(indice_to_program)
df_TurnoverReport['Program'] = df_TurnoverReport['Pty Indice'].map(indice_to_program)
df_Snapshot['Program'] = df_Snapshot['Pty Indice'].map(indice_to_program)
df_WIP['Program'] = df_WIP['Pty Indice'].map(indice_to_program)
df_PendingReport['Priority'] = df_PendingReport['Pty Indice'].map(indice_to_priority)
###################################################
# Renaming
###################################################
# Rename certain columns and assign back to df_WIP
df_WIP = df_WIP.rename(columns={
'Qty Ordered': 'WO Qty',
})
# Rename certain columns and assign back to df_Backlog
df_Backlog = df_Backlog.rename(columns={
#'Backlog row Qty': 'Qty',
'Backlog row Qty': 'Backlog Qty',
'Remain. crit. Qty': 'Rem. Qty'
})
# Rename certain columns
df_TurnoverReport = df_TurnoverReport.rename(columns={
'TurnoverReport row Qty': 'Qty',
})
# Rename certain columns
df_Summary = df_Summary.rename(columns={
'Remain. crit. Qty': 'Rem. Qty',
'Max Qty (GS)': 'Qty (GS/BOM)'
})
#################################################################################################################
# Widgets initialization
################################################################################################################
# Check all entries with priority '3'
#print(df_Priority[df_Priority['Priority'] == 3]) # Adjust to match your data type
# Example usage with defaults
#default_program = 'Phase 4'
#default_priority = 3 # Use integer for consistency
#default_indice = 'P3A'
default_program = 'Phase 4-5'
default_priority = 5
default_indice = 'P5'
# Function to filter priorities based on program selection
def filter_priorities(program):
if program == 'All':
priorities = ['All'] + df_Priority['Priority'].unique().tolist()
else:
priorities = df_Priority[df_Priority['Program'] == program]['Priority'].unique().tolist()
return priorities
''' Update 09/26 to remove 'All' from indice_widget
# Function to filter indices based on priority selection
def filter_indices(priority):
if priority == 'All':
indices = ['All'] + df_Priority['Pty Indice'].unique().tolist()
else:
# Filter indices based on the selected priority
indices = ['All'] + df_Priority[df_Priority['Priority'] == priority]['Pty Indice'].unique().tolist()
return indices
'''
# Function to filter indices based on priority selection
def filter_indices(priority):
if priority == 'All':
indices = df_Priority['Pty Indice'].unique().tolist() # Remove 'All' option here
else:
# Filter indices based on the selected priority
indices = df_Priority[df_Priority['Priority'] == priority]['Pty Indice'].unique().tolist()
return indices
# Initialize program widget, excluding NaN values
unique_programs = df_Priority['Program'].dropna().unique().tolist()
program_widget = pn.widgets.Select(name='Select Program', options=unique_programs, value=default_program)
# Initialize priority widgets
filtered_priorities = filter_priorities(default_program)
priority_widget = pn.widgets.Select(name='Select Priority', options=filtered_priorities, value=default_priority)
# Initialize indice widgets
filtered_indices = filter_indices(default_priority)
indice_widget = pn.widgets.Select(name='Select Pty Indice', options=filtered_indices, value=default_indice)
######################################################################################
# Widgets callback functions
########################################################################################
# Callback function to update priority_widget and indice_widget when the program changes
def update_program(event):
selected_program = program_widget.value
# Update priorities based on selected program
priority_widget.options = filter_priorities(selected_program)
# Ensure the selected priority is valid
if priority_widget.value not in priority_widget.options:
priority_widget.value = priority_widget.options[0] if priority_widget.options else None
# Update indices based on the updated priority
update_priorities(event)
# Function to update priorities based on program selection
def update_priorities(event):
selected_program = program_widget.value
updated_priorities = df_Priority[df_Priority['Program'] == selected_program]['Priority'].unique().tolist()
# Update priorities widget options
priority_widget.options = updated_priorities
# Ensure the selected priority is valid
if priority_widget.value not in updated_priorities:
priority_widget.value = updated_priorities[0] if updated_priorities else None
# Update indices based on the updated priority
update_indices(event)
# Function to update indices based on priority selection
def update_indices(event):
selected_priority = priority_widget.value
indice_widget.options = filter_indices(selected_priority)
# Set to default value if it's valid; otherwise, choose the first available
if default_indice in indice_widget.options:
indice_widget.value = default_indice
else:
indice_widget.value = indice_widget.options[0] if indice_widget.options else None
##############################################################################################################################
# --->>>> ENGINEERING <<<---
##############################################################################################################################
# --> Pending Report
##############################################################################################################################
#Get date from the Pending Report - ['Last Update'] in [T2]
date_pendingreport = df_PendingReport['Last Update'].iloc[0] # Get the first date in the 'Last Update' column
# Fill NaN values with empty strings
df_PendingReport['Comment'].fillna('', inplace=True)
# Define the initial empty DataFrame for changes_table
initial_changes_df = pd.DataFrame(columns=['Pty Indice', 'IDD Top Level', 'SEDA Top Level', 'Item Number', 'Action Needed', 'Rel Date', 'Comment'])
# Define column widths dictionary
column_widths = {
'Pty Indice': 60
}
# Create a Tabulator widget for sales_table
changes_table = pn.widgets.Tabulator(
initial_changes_df,
layout='fit_data_table', # Adjust columns to fit data (excluding header)
sizing_mode='stretch_width',
show_index=False, # This hides the index column
widths=column_widths # Set column widths
)
# Create a Markdown pane for messages in the Pending Report
pending_message_pane = pn.pane.Markdown("")
def update_changes_table(event):
selected_program = program_widget.value
selected_priority = priority_widget.value
selected_indice = indice_widget.value
# Filter df_PendingReport based on selected_program
if selected_program == 'All':
filtered_df_PendingReport = df_PendingReport.copy()
else:
filtered_df_PendingReport = df_PendingReport[df_PendingReport['Program'] == selected_program]
# Apply additional filters
if selected_priority != 'All':
filtered_df_PendingReport = filtered_df_PendingReport[filtered_df_PendingReport['Priority'] == selected_priority]
if selected_indice != 'All':
filtered_df_PendingReport = filtered_df_PendingReport[filtered_df_PendingReport['Pty Indice'] == selected_indice]
# Check if the filtered DataFrame is empty
if filtered_df_PendingReport.empty:
pending_message_pane.object = "**No open changes related to this PN**" # Simple message
changes_table.visible = False # Hide the changes table
else:
changes_table.value = filtered_df_PendingReport[['Pty Indice', 'IDD Top Level', 'SEDA Top Level', 'Item Number', 'Action Needed', 'Rel Date', 'Comment']]
pending_message_pane.object = "" # Clear the message
changes_table.visible = True # Show the changes table
'''
def update_changes_table(event):
selected_program = program_widget.value
selected_priority = priority_widget.value
selected_indice = indice_widget.value
# Filter df_PendingReport based on selected_program
if selected_program == 'All':
filtered_df_PendingReport = df_PendingReport.copy() # Use a copy of the entire DataFrame
else:
filtered_df_PendingReport = df_PendingReport[df_PendingReport['Program'] == selected_program]
# Apply additional filters based on selected_priority and selected_indice
if selected_priority != 'All':
filtered_df_PendingReport = filtered_df_PendingReport[filtered_df_PendingReport['Priority'] == selected_priority]
if selected_indice != 'All':
filtered_df_PendingReport = filtered_df_PendingReport[filtered_df_PendingReport['Pty Indice'] == selected_indice]
# Check if the filtered DataFrame is empty
if filtered_df_PendingReport.empty:
# Determine the appropriate message
if selected_priority == 'All' and selected_indice == 'All':
message_pane.object = 'No open changes available'
elif selected_priority == 'All':
message_pane.object = f"**No open changes for {selected_indice}**"
elif selected_indice == 'All':
message_pane.object = f"**No open changes for Priority {selected_priority}**"
else:
message_pane.object = f"**No open changes for Priority {selected_priority} and {selected_indice}**"
changes_table.visible = False # Hide the changes table
else:
changes_table.value = filtered_df_PendingReport[['Pty Indice', 'IDD Top Level', 'SEDA Top Level', 'Item Number', 'Action Needed', 'Rel Date', 'Comment']]
message_pane.object = "" # Clear the message
changes_table.visible = True # Show the changes table
'''
# Initialize the changes table with default values
update_changes_table(None)
### --> WIP 10/08 <---
##############################################################################################################################
# --> ADCN Report
##############################################################################################################################
#Get date from the Pending Report - ['Last Update'] is column [S] wiht the date in S2
date_adcngreport = df_ADCNReport['Last Update'].iloc[0] # Get the first date in the 'Last Update' column
# Reformatting of 'Pty Indice', erase the '[' and ']' and only keep the values inside or the values separated with ';'
df_ADCNReport['Pty Indice'] = df_ADCNReport['Pty Indice'].str.strip('[]').str.split(';')
# Explode the DataFrame to create a new row for each index
df_exploded = df_ADCNReport.explode('Pty Indice')
# Mapping Priority based on 'Pty Indice'
df_exploded['Priority'] = df_exploded['Pty Indice'].map(indice_to_priority)
# Reset the index if needed
df_exploded.reset_index(drop=True, inplace=True)
# Display the final DataFrame
#print('df_ADCNReport after transformation:')
#display(df_exploded[['Pty Indice', 'ADCN#', 'ESR#', 'Created', 'Release Date', 'Drawing Number', 'Status', 'ADCN Rev', 'Change Description', 'Priority', 'Program']])
# Update df_ADCNReport with the exploded DataFrame
df_ADCNReport = df_exploded
#Create a panda datafram with the columns 'Pty Indice', 'ADCN#' , 'ESR#', 'Created', 'Release Date', 'Drawing Number', 'Status', 'ADCN Rev', 'Change Description', 'Priority', 'Program'
df_ADCNReport = df_ADCNReport[['Pty Indice', 'ADCN#', 'ESR#', 'Created', 'Release Date', 'Drawing Number', 'Status', 'ADCN Rev', 'Change Description', 'Priority', 'Program']]
#Replace 'Status'and 'ADCN Rev' with empty space
df_ADCNReport['Status'].fillna('', inplace=True) # Replace NaN with empty string for 'Status'
df_ADCNReport['ADCN Rev'].fillna('', inplace=True) # Replace NaN with empty string for 'ADCN Rev'
df_ADCNReport['Change Description'].fillna('', inplace=True) # Replace NaN with empty string for 'ADCN Rev'
# Convert 'Created' and 'Release Date' to datetime format, stripping time, and formatting as MM/DD/YYYY
df_ADCNReport['Created'] = pd.to_datetime(df_ADCNReport['Created'], errors='coerce')
df_ADCNReport['Release Date'] = pd.to_datetime(df_ADCNReport['Release Date'], errors='coerce')
# Format dates to MM/DD/YYYY
df_ADCNReport['Created'] = df_ADCNReport['Created'].dt.strftime('%m/%d/%Y')
df_ADCNReport['Release Date'] = df_ADCNReport['Release Date'].dt.strftime('%m/%d/%Y')
# Write 'ADCN not created' where 'Created' is empty
df_ADCNReport['Created'].replace('', 'ADCN not created', inplace=True)
df_ADCNReport['Created'].replace(pd.NaT, 'ADCN not created', inplace=True)
# Replace NaN in 'Release Date' with 'Not released'
df_ADCNReport['Release Date'].replace(pd.NaT, 'ADCN not released', inplace=True)
#Display dataframe
#print('df_ADCNReport')
#display(df_ADCNReport)
# Function to apply font color formatting based on 'Status'
def font_color_status(val):
if val == 'ADCN not created':
return 'color: red;' # Return red font color for "ADCN not created"
else:
return 'color: black;' # Default to black font color
# Function to apply font color formatting based on 'Release Date'
def font_color_release_date(val):
if val == 'ADCN not released':
return 'color: red;' # Return red font color for "Not released"
else:
return 'color: black;' # Default to black font color
# Initialize the DataFrame pane and message pane
#ADCN_pane = pn.pane.DataFrame(pd.DataFrame(), sizing_mode='stretch_width')
ADCN_pane = pn.pane.DataFrame(pd.DataFrame(),
sizing_mode='stretch_width', # Keep it responsive
height=600) # Set max height
adcn_message_pane = pn.pane.Markdown("", sizing_mode='stretch_width')
# Update function to handle table updates with color formatting
def update_ADCN_table(event):
selected_program = program_widget.value
selected_priority = priority_widget.value
selected_indice = indice_widget.value
# Filter the DataFrame based on selected values
mask = pd.Series(True, index=df_ADCNReport.index)
if selected_priority != 'All':
mask &= (df_ADCNReport['Priority'] == selected_priority)
if selected_program != 'All':
mask &= (df_ADCNReport['Program'] == selected_program)
if selected_indice != 'All':
mask &= df_ADCNReport['Pty Indice'].str.contains(selected_indice)
# Apply the mask to filter the DataFrame
filtered_df = df_ADCNReport[mask]
# Check if the filtered DataFrame is empty
if filtered_df.empty:
ADCN_pane.object = pd.DataFrame() # Clear the DataFrame pane
ADCN_pane.visible = False # Hide the DataFrame pane
adcn_message_pane.object = '**No ADCN related to this PN**' # Show no data message
else:
# Select relevant columns to display and hide 'Priority' and 'Program'
displayed_df = filtered_df[['Pty Indice', 'ADCN#', 'ESR#', 'Created', 'Release Date',
'Drawing Number', 'Status', 'ADCN Rev', 'Change Description']]
# Apply color formatting using 'applymap' for the 'Status' column
#styled_df = displayed_df.style.applymap(font_color_status, subset=['Status'])
# Apply color formatting using 'applymap' for the 'Status' and 'Release Date' columns
styled_df = displayed_df.style.applymap(font_color_status, subset=['Created']) \
.applymap(font_color_release_date, subset=['Release Date'])
# Update the ADCN_pane with the styled DataFrame
ADCN_pane.object = styled_df.hide(axis='index')
ADCN_pane.visible = True # Show the DataFrame pane
adcn_message_pane.object = "" # Clear the message
# Call the update function initially to populate the table
update_ADCN_table(None)
# Attach the update function to widget value changes
program_widget.param.watch(update_ADCN_table, 'value')
priority_widget.param.watch(update_ADCN_table, 'value')
indice_widget.param.watch(update_ADCN_table, 'value')
###############################################################################################
# Initial call to update_widgets_and_table to populate the table based on default selections
#############################################################################################
# Define supply dashboard
changes_dashboard = pn.Column(
pn.pane.HTML(f"""
<div style="text-align: left;">
<style>
h2 {{ margin-bottom: 0; color: #305496; }} /* Set title color here */
p {{ margin-top: 0; }}
</style>
<h2>Engineering</h2>
<p>{f"|PendingReport| - <b>{date_pendingreport}</b>: IDD's internal changes based on Agile (PLM) | Pending Report from Change Analyst | [weekly update]"}</p>
</div>
"""),
pending_message_pane,
changes_table,
pn.Spacer(height=20),
pn.pane.HTML(f"""
<div style="text-align: left;">
<style>
p {{ margin-top: 0; }}
</style>
<p>{f"|ADCN Report| - <b>{date_adcngreport}</b>: IDD's external changes based on SEDA's ADCN Report | Since beginning of the current year | [weekly update]"}</p>
</div>
"""),
adcn_message_pane, # Add ADCN message pane here
ADCN_pane,
sizing_mode='stretch_width' # Adjust sizing mode
)
##############################################################################################################################
# --->>>> SALES <<<--- updated 08/22
##############################################################################################################################
# Backlog
#######################################################
# Define the initial empty DataFrame for sales_table
#initial_sales_df = pd.DataFrame(columns=['Pty Indice', 'IDD Top Level', 'SEDA Top Level', 'Qty','Rem. Qty', 'Order', 'Due Date'])
initial_sales_df = pd.DataFrame(columns=['Pty Indice', 'IDD Top Level', 'SEDA Top Level', 'Backlog Qty', 'Rem. Qty', 'Order', 'Due Date'])
# Define column widths dictionary
column_widths = {
'Pty Indice': 60,
#'Qty': 60,
'Backlog Qty': 60,
'Rem. Qty': 80,
'Order': 80
}
# Create a Tabulator widget for sales_table
sales_table = pn.widgets.Tabulator(
initial_sales_df,
layout='fit_data_table', # Adjust columns to fit data (excluding header)
sizing_mode='stretch_both',
show_index=False, # This hides the index column
widths=column_widths # Set column widths
)
# Create a Markdown pane for messages in the Sales table
message_pane = pn.pane.Markdown("")
def update_sales_table(event):
selected_program = program_widget.value
selected_priority = priority_widget.value
selected_indice = indice_widget.value
# Filter df_Backlog based on selected_program
if selected_program == 'All':
filtered_df_backlog = df_Backlog.copy() # Use a copy of the entire DataFrame
else:
filtered_df_backlog = df_Backlog[df_Backlog['Program'] == selected_program]
# Apply additional filters based on selected_priority and selected_indice
if selected_priority != 'All':
filtered_df_backlog = filtered_df_backlog[filtered_df_backlog['Priority'] == selected_priority]
if selected_indice != 'All':
filtered_df_backlog = filtered_df_backlog[filtered_df_backlog['Pty Indice'] == selected_indice]
# Group by 'Order' and aggregate the 'Qty' (sum) and 'Rem. Qty' (first)
aggregated_df = filtered_df_backlog.groupby('Order').agg({
'Pty Indice': 'first',
'IDD Top Level': 'first',
'SEDA Top Level': 'first',
#'Qty': 'sum',
'Backlog Qty': 'sum',
'Rem. Qty': 'first', # Use the first non-null value
'Due Date': 'first',
}).reset_index()
# Check if the aggregated DataFrame is empty
if aggregated_df.empty:
sales_table.visible = False # Hide the sales table
message_pane.object = 'No data available' # Display a message indicating no data
else:
sales_table.value = aggregated_df[['Pty Indice', 'IDD Top Level', 'SEDA Top Level', 'Backlog Qty', 'Rem. Qty', 'Order', 'Due Date']]
sales_table.visible = True # Show the sales table
message_pane.object = "" # Clear the message
# Initialize the sales table with default values
update_sales_table(None)
# define color for important text
important_text_color = '#002060' # dark bleu
########################################################
# Create a pane for displaying dynamic text for sales
########################################################
sales_summary = pn.pane.Str(sizing_mode='stretch_width')
def update_sales_summary(event):
selected_program = program_widget.value
selected_priority = priority_widget.value
selected_indice = indice_widget.value
# Initialize a boolean mask with all True values
mask = pd.Series(True, index=df_Backlog.index)
# Apply filters based on selections
if selected_program != 'All':
mask &= (df_Backlog['Program'] == selected_program)
if selected_priority != 'All':
mask &= (df_Backlog['Priority'] == selected_priority)
if selected_indice != 'All':
mask &= (df_Backlog['Pty Indice'] == selected_indice)
# Filter df_Backlog using the constructed mask
filtered_df_backlog = df_Backlog[mask]
# Check if filtered_df_backlog is empty or not
if filtered_df_backlog.empty:
sales_summary.object = 'No data available'
else:
# Initialize a dictionary to sum quantities for each 'Pty Indice'
summary_dict = {}
# Iterate over each row in filtered_df_backlog
for idx, row in filtered_df_backlog.iterrows():
pty_indice = row['Pty Indice']
order = row['Order']
#qty = row['Qty'] 08/22
qty = row['Backlog Qty']
if pty_indice not in summary_dict:
summary_dict[pty_indice] = {
'total_qty': 0,
'orders': set(),
'shipped': 0,
'critical_qty': 0
} # Initialize orders as a set
summary_dict[pty_indice]['total_qty'] += qty
summary_dict[pty_indice]['orders'].add(order) # Use set to avoid duplicates
# Retrieve shipment data from df_Priority
shipment_data = df_Priority[df_Priority['Pty Indice'].isin(summary_dict.keys())]
# Get details for the current 'Pty Indice'
details = filtered_df_backlog[filtered_df_backlog['Pty Indice'] == pty_indice].iloc[0]
for pty_indice in summary_dict.keys():
# Fetch the shipment data for the current 'Pty Indice'
shipment_info = shipment_data[shipment_data['Pty Indice'] == pty_indice]
# Calculate shipped quantities
shipped = shipment_info['Shipped'].sum() if not shipment_info['Shipped'].isna().all() else 0
#Convert 'shipped' to int - 08/14
shipped = int(shipped)
# Handle and convert 'Critical Qty'
critical_qty = shipment_info['Critical Qty']
if not critical_qty.isna().all():
try:
critical_qty = critical_qty.astype(int).sum()
except ValueError:
critical_qty = 0
else:
critical_qty = 0
# Handle non-numeric values for 'Critical Qty'
if isinstance(critical_qty, str) and critical_qty.strip().lower() == 'completed':
shipment = "Critical quantity fulfilled"
else:
shipment = f"Total quantity (<b>{shipped}</b>) shipped over (<b>{critical_qty}</b>) total critical quantity"
# Add shipment information to the summary
summary_dict[pty_indice]['shipment'] = shipment
# Format the output
lines = []
for pty_indice, data in summary_dict.items():
orders_concat = ', '.join(data['orders']) # Convert set to a sorted list for display
shipment_info = data.get('shipment', 'No shipment information available')
line = (
#f"<u>Pty Indice</u>: <b>{pty_indice}</b> - <b>{details['IDD Top Level']}</b> ({details['SEDA Top Level']})<br>"
f"<u>Pty Indice</u>: <span style='color:{important_text_color};'><b>{pty_indice}</b> - <b>{details['IDD Top Level']}</b></span> ({details['SEDA Top Level']})<br>"
f"▷ IDD Backlog for {pty_indice} is <b>{data['total_qty']}</b> Top-Level within the following SO: {orders_concat}<br>"
f"▷ {shipment_info}<br>"
)
lines.append(line)
# Join all lines into a single string
display_text = '\n'.join(lines)
sales_summary.object = display_text
# Define an initial call to populate the table when the app starts
update_sales_summary(None)
###############################################################
# Turnover Report
###############################################################
# Convert 'Tracking#' to string
df_TurnoverReport['Tracking#'] = df_TurnoverReport['Tracking#'].astype(str)
# Replace 'nan' with an empty string
df_TurnoverReport['Tracking#'] = df_TurnoverReport['Tracking#'].replace('nan', '')
# Remove '.0' from the string values
df_TurnoverReport['Tracking#'] = df_TurnoverReport['Tracking#'].str.replace('.0', '', regex=False)
#Define the initial empty DataFrame for turnover_table
initial_turnover_df = pd.DataFrame(columns=['Pty Indice', 'IDD Top Level', 'SEDA Top Level', 'Qty', 'Invoice date', 'Order', 'Tracking#'])
# Ensure 'Invoice date' is in datetime format
df_TurnoverReport['Invoice date'] = pd.to_datetime(df_TurnoverReport['Invoice date'])
# Format 'Invoice date' to short date format
df_TurnoverReport['Invoice date'] = df_TurnoverReport['Invoice date'].dt.strftime('%m/%d/%Y')
# Define column widths dictionary
column_widths = {
'Pty Indice': 60,
'Qty': 60,
'Rem. Qty': 80,
'Order': 80
}
###############################################
# Create a Tabulator widget for turnover_table
################################################
turnover_table = pn.widgets.Tabulator(
initial_turnover_df,
layout='fit_data_table',
sizing_mode='stretch_both',
show_index=False, # This hides the index column
widths=column_widths
)
# Define the turnover_message_pane
turnover_message_pane = pn.pane.HTML('')
def update_turnover_table(event):
selected_program = program_widget.value
selected_priority = priority_widget.value
selected_indice = indice_widget.value
# Filter df_TurnoverReport based on selected_program
if selected_program == 'All':
filtered_df_TurnoverReport = df_TurnoverReport.copy() # Make a copy of the entire DataFrame
else:
filtered_df_TurnoverReport = df_TurnoverReport[df_TurnoverReport['Program'] == selected_program]
# Apply additional filters based on selected_priority and selected_indice
if selected_priority != 'All':
filtered_df_TurnoverReport = filtered_df_TurnoverReport[filtered_df_TurnoverReport['Priority'] == selected_priority]
if selected_indice != 'All':
filtered_df_TurnoverReport = filtered_df_TurnoverReport[filtered_df_TurnoverReport['Pty Indice'] == selected_indice]
# Check if the filtered DataFrame is empty
if filtered_df_TurnoverReport.empty:
if selected_indice == 'All':
turnover_message_pane.object = f"**No shippment or NC received within the period for Priority {selected_priority}**"
else:
turnover_message_pane.object = f"**No shippment or NC received within the period for {selected_indice}**"
turnover_table.visible = False # Hide the turnover table
else:
turnover_table.value = filtered_df_TurnoverReport[['Pty Indice', 'IDD Top Level', 'SEDA Top Level', 'Qty', 'Invoice date', 'Order', 'Tracking#']]
turnover_message_pane.object = "" # Clear the message
turnover_table.visible = True # Show the turnover table
# Initialize the turnover table with default values
update_turnover_table(None)
###############################################
# Create a Markdown pane for turnover summary
################################################
turnover_summary = pn.pane.Str(sizing_mode='stretch_width')
# Convert 'Invoice date' to datetime format
df_TurnoverReport['Invoice date'] = pd.to_datetime(df_TurnoverReport['Invoice date'])
# Calculate the span period of the Turnover Report span_TurnoverReport
start_date = df_TurnoverReport['Invoice date'].min()
end_date = df_TurnoverReport['Invoice date'].max()
span_TurnoverReport = f"{start_date.date()} to {end_date.date()}" # Format dates as needed
def update_turnover_summary(event):
selected_program = program_widget.value
selected_priority = priority_widget.value
selected_indice = indice_widget.value
# Filter df_TurnoverReport based on selected_program
if selected_program == 'All':
filtered_df_TurnoverReport = df_TurnoverReport.copy() # Make a copy of the entire DataFrame
else:
filtered_df_TurnoverReport = df_TurnoverReport[df_TurnoverReport['Program'] == selected_program]
# Apply additional filters based on selected_priority and selected_indice
if selected_priority != 'All':
filtered_df_TurnoverReport = filtered_df_TurnoverReport[filtered_df_TurnoverReport['Priority'] == selected_priority]
if selected_indice != 'All':
filtered_df_TurnoverReport = filtered_df_TurnoverReport[filtered_df_TurnoverReport['Pty Indice'] == selected_indice]
# Check if filtered_df_TurnoverReport is empty or not
if filtered_df_TurnoverReport.empty:
turnover_summary.object = 'No data available'
else:
# Initialize a dictionary to sum quantities for each 'Pty Indice'
summary_dict = {}
# Iterate over each row in filtered_df_TurnoverReport
for idx, row in filtered_df_TurnoverReport.iterrows():
pty_indice = row['Pty Indice']
order = row['Order']
qty = row['Qty']
if pty_indice not in summary_dict:
summary_dict[pty_indice] = {
'total_qty': 0,
'top_level_shipped': 0,
'nc_shipped': 0,
'nc_received': 0,
'orders': set(),
'details': filtered_df_TurnoverReport[filtered_df_TurnoverReport['Pty Indice'] == pty_indice].iloc[0]
} # Initialize orders as a set
summary_dict[pty_indice]['total_qty'] += qty
summary_dict[pty_indice]['orders'].add(order) # Use set to avoid duplicates
# Categorize the quantity
if qty > 0:
if 'NC' in order:
summary_dict[pty_indice]['nc_shipped'] += qty
else:
summary_dict[pty_indice]['top_level_shipped'] += qty
elif qty < 0 and 'NC' in order:
summary_dict[pty_indice]['nc_received'] -= qty # Use '-' to keep positive values for display
# Format the output
lines = []
for pty_indice, data in summary_dict.items():
details = data['details']
orders_concat = ', '.join(data['orders'])
line = (
#f"<u>Pty Indice</u>: <b>{pty_indice}</b> - <b>{details['IDD Top Level']}</b> ({details['SEDA Top Level']}) - Repport's span (<b>{span_TurnoverReport}</b>)<br>"
f"<u>Pty Indice</u>: <span style='color:{important_text_color};'><b>{pty_indice}</b> - <b>{details['IDD Top Level']}</b></span> ({details['SEDA Top Level']}) - Repport's span (<b>{span_TurnoverReport}</b>)<br>"
f"▷ Qty Top-Level shipped on the period: <b>{data['top_level_shipped']}</b><br>"
f"▷ Qty NC shipped on the period: <b>{data['nc_shipped']}</b><br>"
f"▷ Qty NC received on the period: <b>{data['nc_received']}</b><br>"
)
lines.append(line)
# Join all lines into a single string
display_text = '<br>'.join(lines)
turnover_summary.object = display_text
# Define an initial call to populate the table when the app starts
update_turnover_summary(None)
###############################################################################################
# Initial call to update_widgets_and_table to populate the table based on default selections
#############################################################################################
sales_dashboard = pn.Column(
pn.pane.HTML(f"""
<div style='text-align: left;'>
<style>
h2 {{ margin-bottom: 0; color: #305496; }} /* Set title color here */
p {{ margin-top: 0; }}
</style>
<h2>Backlog & recent shipment</h2>
<p>{f"|CM-Backlog| - <b>{file_date}</b>: IDD's backlog based on QAD (ERP) | [Daily update]"}</p>
</div>
"""),
pn.Row(sales_summary, sizing_mode='stretch_both'), # Row to stretch content
sales_table,
pn.pane.HTML(f"""
<div style='text-align: left;'>
<p>{f"|CM-Turnover Report| - <b>{file_date}</b>: Top-Level shipped/Received at IDD based on QAD (ERP) | [Daily update, limited span (starts the 1st of current month)]"}</p>
</div>
"""),
turnover_message_pane, # Add the turnover message pane here
turnover_summary,
turnover_table,
sizing_mode='stretch_width', # Adjust sizing mode
height=600 # Set a fixed height to enforce the maximum height
)
##############################################################################################################################
# --->>>> SUPPLY CHAIN <<<---
##############################################################################################################################
# Filter out rows where 'Qty On Hand' is NaN
df_Summary = df_Summary[df_Summary['Qty On Hand'].notna()]
# Round 'Qty On Hand' and 'Qty (GS/BOM)' to integers
df_Summary['Qty On Hand'] = df_Summary['Qty On Hand'].round().astype(int)
#df_Summary['Qty (GS/BOM)'] = df_Summary['Qty (GS/BOM)'].round().astype(int) # saved 02/03
### Update 02/03 #################
# Convert to numeric type first (invalid values become NaN)
df_Summary['Qty (GS/BOM)'] = pd.to_numeric(df_Summary['Qty (GS/BOM)'], errors='coerce')
# Handle missing values (fill with 0 or appropriate value)
df_Summary['Qty (GS/BOM)'] = df_Summary['Qty (GS/BOM)'].fillna(0)
# Now perform rounding and conversion to integers
df_Summary['Qty (GS/BOM)'] = df_Summary['Qty (GS/BOM)'].round().astype(int)
####################################
#Display selected_indice, with related 'IDD Top Level', 'SEDA Top Level', 'Top-Level Status' and 'Max Qty Top-Level' from df_Summary above the table
#Apply mapping to create 'Program' column in df_Summary
df_Summary['Program'] = df_Summary['Pty Indice'].map(indice_to_program)
# Fill NaN values with empty strings
df_Summary['Top Level sharing Components'].fillna('', inplace=True)
df_Summary['Comment'].fillna('', inplace=True)
# Replace 'SAFRAN ELEC & DEFENSE(S9412)' with 'Safran EDA' in the 'Supplier' column
df_Summary['Supplier'].replace('SAFRAN ELEC & DEFENSE(S9412)', 'Safran EDA', inplace=True)
# 09/20 update
# Define a list of acronyms to preserve in uppercase
acronyms = ['EDA', 'PCB', 'PWB', 'CPA', 'CPSL', 'ISP', 'TBD'] # Add more acronyms as needed
# Function to capitalize while preserving acronyms
def title_with_acronyms(text, acronyms):
# Convert the text to title case (first letter capitalized, rest lowercase)
text = text.lower().title()
# Use regex to replace the acronyms in uppercase
for acronym in acronyms:
text = re.sub(rf'\b{acronym.title()}\b', acronym, text)
return text
# Apply the function to the 'Supplier' and 'Description' columns
df_Summary['Supplier'] = df_Summary['Supplier'].astype(str) # Added 02/03
df_Summary['Supplier'] = df_Summary['Supplier'].apply(lambda x: title_with_acronyms(x, acronyms))
df_Summary['Description'] = df_Summary['Description'].astype(str) # Added 02/03
df_Summary['Description'] = df_Summary['Description'].apply(lambda x: title_with_acronyms(x, acronyms))
'''
# Define the initial empty DataFrame for supply_table
initial_df = pd.DataFrame(columns=['Pty Indice', 'IDD Component', 'Level', 'Description', 'Qty (GS/BOM)', 'Supplier', 'Top Level sharing Components', 'Comment','Qty On Hand','Rem. Qty'])
# Define column widths dictionary
column_widths = {
'Pty Indice': 60,
'Qty On Hand': 120,
'Rem. Qty': 80,
'Qty (GS/BOM) ': 120,
}
'''
#New 09/03
##################################################################
# Create a supply_table with Panel for Purchased architecture
##################################################################
# Color formating of wip_table - ['Level']
###########################################
# Color of 'Level'
#Level == 0: '63BE7B' # Green
#Level == 1: 'A2C075' # Lighter Green
#Level == 2: 'FFEB84' # Yellow
#Level == 3: 'FFD166' # Orange
#Level == 4: 'F88E5B' # Darker Orange
#Level == 5: 'F8696B' # Red
#Level == 6: '8B0000' # Darker Red
# Define color mapping for 'Level'
color_mapping_Level = {
0: '#63BE7B',
1: '#A2C075',
2: '#FFEB84',
3: '#FFD166',
4: '#F88E5B',
5: '#F8696B',
6: '#8B0000'
}
def apply_color_formatting(df):
# Create a DataFrame for styles, initializing with empty strings
styles = pd.DataFrame('', index=df.index, columns=df.columns)
# Fill missing values in 'Level' with a default value or handle them separately
if 'Level' in df.columns:
df['Level'] = df['Level'].fillna(-1) # Using -1 or any other default value that does not conflict with valid levels
for idx, value in df['Level'].items():
if pd.isna(value) or value not in color_mapping_Level:
styles.at[idx, 'Level'] = 'background-color: #FFFFFF' # Default color for missing values
else:
styles.at[idx, 'Level'] = f'background-color: {color_mapping_Level[value]}'
# Apply font and fill styling for 'Qty (GS/BOM)' column - New 09/18
# Apply font, fill, and border styling for 'Qty (GS/BOM)' column
if 'Qty (GS/BOM)' in df.columns:
for idx, value in df['Qty (GS/BOM)'].items():
if value == 0:
styles.at[idx, 'Qty (GS/BOM)'] = (
'color: #C00000; '
'background-color: #FFC7CE; '
'border: 1px dashed #C00000'
)
return styles
# Function to apply color formatting to 'Level' column
def color_levels(val):
color = color_mapping_Level.get(val, '#FFFFFF') # Default to white if no mapping exists
return f'background-color: {color}'
# Update function to handle table updates with color formatting
def update_supply_table(event):
selected_program = program_widget.value
selected_priority = priority_widget.value
selected_indice = indice_widget.value
# Filter df_Summary based on selected_program
if selected_program == 'All':
filtered_df_summary = df_Summary.copy() # Make a copy of the entire DataFrame
else:
filtered_df_summary = df_Summary[df_Summary['Program'] == selected_program]
# Apply additional filters based on selected_priority and selected_indice
if selected_priority != 'All':
filtered_df_summary = filtered_df_summary[filtered_df_summary['Priority'] == selected_priority]
if selected_indice != 'All':
filtered_df_summary = filtered_df_summary[filtered_df_summary['Pty Indice'] == selected_indice]
# Filter out rows where 'Supplier' == 'Make Part'
filtered_df_summary = filtered_df_summary[filtered_df_summary['Supplier'] != 'Make Part']
# Convert 'Level' and 'Rem. Qty' to integer
filtered_df_summary['Level'] = pd.to_numeric(filtered_df_summary['Level'], errors='coerce').fillna(-1).astype(int)
filtered_df_summary['Rem. Qty'] = pd.to_numeric(filtered_df_summary['Rem. Qty'], errors='coerce').fillna(0).astype(int)
# Filter out rows where 'Qty (GS/BOM)' > 'Remain. crit. Qty'
filtered_df_summary = filtered_df_summary[filtered_df_summary['Qty (GS/BOM)'] <= filtered_df_summary['Rem. Qty']]
# Sort by 'Pty Indice' and 'BOM Index'
#filtered_df_summary = filtered_df_summary.sort_values(by=['Pty Indice', 'BOM Index']) #saved 02/03
filtered_df_summary = filtered_df_summary.sort_values(by=['Pty Indice', 'BOM_Index'])
# Check if the filtered DataFrame is empty
if filtered_df_summary.empty:
supply_table.object = pd.DataFrame({
'Pty Indice': ['No Data'],
'IDD Component': [''],
'Level': [''],
'Description': [''],
'Qty (GS/BOM)': [''],
'Supplier': [''],
'Top Level sharing Components': [''],
'Comment': [''],
'Qty On Hand': [''],
'Rem. Qty': ['']
})
message_pane.object = 'No data available'
else:
supply_table_df = filtered_df_summary[['Pty Indice', 'IDD Component', 'Level', 'Description', 'Qty (GS/BOM)', 'Supplier', 'Top Level sharing Components', 'Comment', 'Qty On Hand', 'Rem. Qty']]
# Apply color formatting using 'applymap' for the 'Level' column
styled_df = supply_table_df.style.applymap(color_levels, subset=['Level'])
# Update the supply_table with the styled DataFrame
supply_table.object = styled_df.hide(axis='index')
message_pane.object = "" # Clear the message
# Initialize the supply_table pane
supply_table = pn.pane.DataFrame(pd.DataFrame(), sizing_mode='stretch_width')
message_pane = pn.pane.Markdown("", sizing_mode='stretch_width')
def on_widget_change_supply(event):
update_supply_table(event) # Simply call the update function
# Call the update initially to trigger the first load
update_supply_table(None)
# SAVED 09/27 to apply color mapping when widget is updated
''' SAVED 09/27 to apply color formatting when widget is updated
def apply_color_formatting(df):
# Create a DataFrame for styles, initializing with empty strings
styles = pd.DataFrame('', index=df.index, columns=df.columns)
# Fill missing values in 'Level' with a default value or handle them separately
if 'Level' in df.columns:
df['Level'] = df['Level'].fillna(-1) # Using -1 or any other default value that does not conflict with valid levels
for idx, value in df['Level'].items():
if pd.isna(value) or value not in color_mapping:
styles.at[idx, 'Level'] = 'background-color: #FFFFFF' # Default color for missing values
else:
styles.at[idx, 'Level'] = f'background-color: {color_mapping[value]}'
# Apply font and fill styling for 'Qty (GS/BOM)' column - New 09/18
# Apply font, fill, and border styling for 'Qty (GS/BOM)' column
if 'Qty (GS/BOM)' in df.columns:
for idx, value in df['Qty (GS/BOM)'].items():
if value == 0:
styles.at[idx, 'Qty (GS/BOM)'] = (
'color: #C00000; '
'background-color: #FFC7CE; '
'border: 1px dashed #C00000'
)
return styles
def update_supply_table(event):
selected_program = program_widget.value
selected_priority = priority_widget.value
selected_indice = indice_widget.value
# Filter df_Summary based on selected_program
if selected_program == 'All':
filtered_df_summary = df_Summary.copy() # Make a copy of the entire DataFrame
else:
filtered_df_summary = df_Summary[df_Summary['Program'] == selected_program]
# Apply additional filters based on selected_priority and selected_indice
if selected_priority != 'All':
filtered_df_summary = filtered_df_summary[filtered_df_summary['Priority'] == selected_priority]
if selected_indice != 'All':
filtered_df_summary = filtered_df_summary[filtered_df_summary['Pty Indice'] == selected_indice]
# Filter-out Supplier = 'Make Part' as they are not relevant for supply chain purposes
filtered_df_summary = filtered_df_summary[filtered_df_summary['Supplier'] != 'Make Part']
# Convert 'Level' and 'Rem. Qty' to integer
filtered_df_summary['Level'] = pd.to_numeric(filtered_df_summary['Level'], errors='coerce').fillna(-1).astype(int)
filtered_df_summary['Rem. Qty'] = pd.to_numeric(filtered_df_summary['Rem. Qty'], errors='coerce').fillna(0).astype(int)
# Filter out rows where 'Qty (GS/BOM)' > 'Remain. crit. Qty'
filtered_df_summary = filtered_df_summary[filtered_df_summary['Qty (GS/BOM)'] <= filtered_df_summary['Rem. Qty']]
# Sort by 'Pty Indice' and 'BOM Index'
filtered_df_summary = filtered_df_summary.sort_values(by=['Pty Indice', 'BOM Index'])
# Check if the filtered DataFrame is empty
if filtered_df_summary.empty:
supply_table.object = pd.DataFrame({
'Pty Indice': ['No Data'],
'IDD Component': [''],
'Level': [''],
'Description': [''],
'Qty (GS/BOM)': [''],
'Supplier': [''],
'Top Level sharing Components': [''],
'Comment': [''],
'Qty On Hand': [''],
'Rem. Qty': ['']
})
message_pane.object = 'No data available' # Display a message indicating no data
else:
supply_table_df = filtered_df_summary[['Pty Indice', 'IDD Component', 'Level', 'Description', 'Qty (GS/BOM)', 'Supplier', 'Top Level sharing Components', 'Comment', 'Qty On Hand', 'Rem. Qty']]
# Handle missing values in 'Level' column before styling
supply_table_df['Level'] = supply_table_df['Level'].fillna(-1)
# Apply color formatting to 'Level' column
styles = apply_color_formatting(supply_table_df)
# Apply color formatting to 'Level' column and 'Qty (GS/BOM)' column - New 09/18
styles = apply_color_formatting(supply_table_df)
# Update the supply_table with styled DataFrame
supply_table.object = supply_table_df.style.apply(lambda x: styles.loc[x.name], axis=1).hide(axis='index')
message_pane.object = "" # Clear the message
# Initialize the supply_table pane
supply_table = pn.pane.DataFrame(pd.DataFrame(), sizing_mode='stretch_width')
message_pane = pn.pane.Markdown("", sizing_mode='stretch_width')
def on_widget_change_supply(event):
update_supply_table(event)
# Refresh the layout if necessary
supply_dashboard[2] = supply_table # Update the table in the layout
# Initial call to populate the table based on default selections
update_supply_table(None)
'''
############## Update 09/16 ################
# Filters for update_supply_table_fullArchi
#########################################
# Define filtering widgets using HTML panes for labels
label_idd_component = pn.pane.HTML('<b style="color:#2B70B3;">IDD Component Filter</b>')
label_supplier = pn.pane.HTML('<b style="color:#2B70B3;">Supplier Filter</b>')
# Define filtering widgets
filters_fullArchi = {
'IDD Component': pn.widgets.TextInput(name=''),
'Supplier': pn.widgets.TextInput(name=''),
}
# Create a button to trigger filtering
filters_fullArchi_button = pn.widgets.Button(name='Apply Filters', button_type='primary')
reset_fullArchi_button = pn.widgets.Button(name='Reset Filters', button_type='default')
############## Update 09/16 ################
# filters_fullArchi for update_supply_table_fullArchi
#########################################
# Set default value to None for all filter widgets
for widget in filters_fullArchi.values():
widget.value = None
# Create the layout with labels and widgets
filter_widgets_fullArchi = pn.Row(
pn.Column(label_idd_component, filters_fullArchi['IDD Component']),
pn.Column(label_supplier, filters_fullArchi['Supplier']),
pn.Column(
pn.Spacer(height=25), # Spacer before the buttons
pn.Row(filters_fullArchi_button, reset_fullArchi_button)
)
)
# Initial call to populate the table based on default selections
#update_supply_table_fullArchi(None)
#########################################
# Update 09/16
########################################################################################
# Create a supply_table with Panel for full architecture Make and Purchased part
#######################################################################################
# Update WIP 09/27 to apply color mapping when widget is updated
def update_supply_table_fullArchi(event):
selected_program = program_widget.value
selected_priority = priority_widget.value
selected_indice = indice_widget.value
idd_component_filter = filters_fullArchi['IDD Component'].value
supplier_filter = filters_fullArchi['Supplier'].value
# Filter df_Summary based on selected_program
if selected_program == 'All':
filtered_df_summary = df_Summary.copy() # Make a copy of the entire DataFrame
else:
filtered_df_summary = df_Summary[df_Summary['Program'] == selected_program]
# Apply additional filters based on selected_priority and selected_indice
if selected_priority != 'All':
filtered_df_summary = filtered_df_summary[filtered_df_summary['Priority'] == selected_priority]
if selected_indice != 'All':
filtered_df_summary = filtered_df_summary[filtered_df_summary['Pty Indice'] == selected_indice]
# Apply filters from the new filter widgets for 'IDD Component' and 'Supplier'
if idd_component_filter: # Only apply if a filter value is provided
filtered_df_summary = filtered_df_summary[filtered_df_summary['IDD Component'].str.contains(idd_component_filter, case=False, na=False)]
if supplier_filter: # Only apply if a filter value is provided
filtered_df_summary = filtered_df_summary[filtered_df_summary['Supplier'].str.contains(supplier_filter, case=False, na=False)]
# Convert 'Level' and 'Rem. Qty' to integer, handling formatting issues
filtered_df_summary['Level'] = pd.to_numeric(filtered_df_summary['Level'], errors='coerce').fillna(-1).astype(int)
filtered_df_summary['Rem. Qty'] = pd.to_numeric(filtered_df_summary['Rem. Qty'], errors='coerce').fillna(0).astype(int)
# Print unique values in 'Level' to debug
#print("Unique values in 'Level' after conversion:", filtered_df_summary['Level'].unique())
# Ensure that -1 is in color_mapping_Level or handle it
if -1 not in color_mapping_Level:
color_mapping_Level[-1] = 'background-color: #FFFFFF' # Default color for -1
# Filter out rows where 'Qty (GS/BOM)' > 'Rem. Qty'
filtered_df_summary = filtered_df_summary[filtered_df_summary['Qty (GS/BOM)'] <= filtered_df_summary['Rem. Qty']]
# Sort by 'Pty Indice' and 'BOM Index'
#filtered_df_summary = filtered_df_summary.sort_values(by=['Pty Indice', 'BOM Index']) # saved 02/03
filtered_df_summary = filtered_df_summary.sort_values(by=['Pty Indice', 'BOM_Index'])
# Check if the filtered DataFrame is empty
if filtered_df_summary.empty:
supply_table_fullArchi.object = pd.DataFrame({
'Pty Indice': ['No Data'],
'IDD Component': [''],
'Level': [''],
'Description': [''],
'Qty (GS/BOM)': [''],
'Supplier': [''],
'Top Level sharing Components': [''],
'Comment': [''],
'Qty On Hand': [''],
'Rem. Qty': ['']
})
message_pane_fullArchi.object = 'No data available' # Display a message indicating no data
else:
supply_table_df = filtered_df_summary[['Pty Indice', 'IDD Component', 'Level', 'Description', 'Qty (GS/BOM)', 'Supplier', 'Top Level sharing Components', 'Comment', 'Qty On Hand', 'Rem. Qty']]
# Apply color formatting to 'Level' column
styles = apply_color_formatting(supply_table_df)
# Update the supply_table with styled DataFrame
supply_table_fullArchi.object = supply_table_df.style.apply(lambda x: styles.loc[x.name], axis=1).hide(axis='index')
message_pane_fullArchi.object = "" # Clear the message
# SAVED 09/27 to apply color mapping when widget is updated
''' SAVED 09/27 to apply color formatting when widget is updated
def update_supply_table_fullArchi(event):
selected_program = program_widget.value
selected_priority = priority_widget.value
selected_indice = indice_widget.value
idd_component_filter = filters_fullArchi['IDD Component'].value
supplier_filter = filters_fullArchi['Supplier'].value
# Filter df_Summary based on selected_program
if selected_program == 'All':
filtered_df_summary = df_Summary.copy() # Make a copy of the entire DataFrame
else:
filtered_df_summary = df_Summary[df_Summary['Program'] == selected_program]
# Apply additional filters based on selected_priority and selected_indice
if selected_priority != 'All':
filtered_df_summary = filtered_df_summary[filtered_df_summary['Priority'] == selected_priority]
if selected_indice != 'All':
filtered_df_summary = filtered_df_summary[filtered_df_summary['Pty Indice'] == selected_indice]
# Apply filters from the new filter widgets for 'IDD Component' and 'Supplier'
if idd_component_filter: # Only apply if a filter value is provided
filtered_df_summary = filtered_df_summary[filtered_df_summary['IDD Component'].str.contains(idd_component_filter, case=False, na=False)]
if supplier_filter: # Only apply if a filter value is provided
filtered_df_summary = filtered_df_summary[filtered_df_summary['Supplier'].str.contains(supplier_filter, case=False, na=False)]
# Convert 'Level' and 'Rem. Qty' to integer
filtered_df_summary['Level'] = pd.to_numeric(filtered_df_summary['Level'], errors='coerce').fillna(-1).astype(int)
filtered_df_summary['Rem. Qty'] = pd.to_numeric(filtered_df_summary['Rem. Qty'], errors='coerce').fillna(0).astype(int)
# Filter out rows where 'Qty (GS/BOM)' > 'Remain. crit. Qty'
filtered_df_summary = filtered_df_summary[filtered_df_summary['Qty (GS/BOM)'] <= filtered_df_summary['Rem. Qty']]
# Sort by 'Pty Indice' and 'BOM Index'
filtered_df_summary = filtered_df_summary.sort_values(by=['Pty Indice', 'BOM Index'])
# Check if the filtered DataFrame is empty
if filtered_df_summary.empty:
supply_table_fullArchi.object = pd.DataFrame({
'Pty Indice': ['No Data'],
'IDD Component': [''],
'Level': [''],
'Description': [''],
'Qty (GS/BOM)': [''],
'Supplier': [''],
'Top Level sharing Components': [''],
'Comment': [''],
'Qty On Hand': [''],
'Rem. Qty': ['']
})
message_pane_fullArchi.object = 'No data available' # Display a message indicating no data
else:
supply_table_df = filtered_df_summary[['Pty Indice', 'IDD Component', 'Level', 'Description', 'Qty (GS/BOM)', 'Supplier', 'Top Level sharing Components', 'Comment', 'Qty On Hand', 'Rem. Qty']]
# Handle missing values in 'Level' column before styling
supply_table_df['Level'] = supply_table_df['Level'].fillna(-1)
# Apply color formatting to 'Level' column
styles = apply_color_formatting(supply_table_df)
# Update the supply_table with styled DataFrame
supply_table_fullArchi.object = supply_table_df.style.apply(lambda x: styles.loc[x.name], axis=1).hide(axis='index')
message_pane_fullArchi.object = "" # Clear the message
'''
#### New 09/16 ####
# Define callback function for the button
def on_filter_button_click(event):
update_supply_table_fullArchi(event)
# Define callback function for the Reset Filters button
def on_reset_button_click(event):
# Reset filter values
for widget in filters_fullArchi.values():
widget.value = ""
# Update table with no filters applied
update_supply_table_fullArchi(event)
# Link the buttons to their respective update functions
filters_fullArchi_button.on_click(on_filter_button_click)
reset_fullArchi_button.on_click(on_reset_button_click)
####################
# Initialize the supply_table pane
message_pane_title = pn.pane.Str("▷ List of components, full architecture (<b>Make Part & Purchased parts</b>) to reach the critical quantity:", sizing_mode='stretch_width')
supply_table_fullArchi = pn.pane.DataFrame(pd.DataFrame(), sizing_mode='stretch_width')
message_pane_fullArchi = pn.pane.Markdown("", sizing_mode='stretch_width')
def on_widget_change_supply_fullArchi(event):
update_supply_table_fullArchi(event)
# Initial call to populate the table based on default selections
update_supply_table_fullArchi(None)
###########################################
# Create a widget for supply_selected_top
###########################################
# Create a scrollable pane for displaying text
#supply_selected_top = pn.pane.Str(example_text, height_policy='max', max_height=400, sizing_mode='stretch_width')
# Create a pane for displaying dynamic text (supply_selected_top)
supply_selected_top = pn.pane.Str(sizing_mode='stretch_width')
def update_supply_selected_top(event):
selected_program = program_widget.value
selected_priority = priority_widget.value
selected_indice = indice_widget.value
# Initialize a boolean mask with all True values
mask = pd.Series(True, index=df_Snapshot.index)
# Apply filters based on selections
if selected_program != 'All':
mask &= (df_Snapshot['Program'] == selected_program)
if selected_priority != 'All':
mask &= (df_Snapshot['Priority'] == selected_priority)
if selected_indice != 'All':
mask &= (df_Snapshot['Pty Indice'] == selected_indice)
# Filter df_Snapshot using the constructed mask
filtered_df_snapshot = df_Snapshot[mask]
# Check if filtered_df_snapshot is empty or not
if filtered_df_snapshot.empty:
# Handle empty DataFrame scenario, e.g., display a message or return early
supply_selected_top.object = 'No data available'
else:
# Initialize an empty list to store formatted strings
lines = []
# Merge with df_Summary to get 'Qty (GS/BOM)' to get 'Qty (GS/BOM)'
merged_df = filtered_df_snapshot.merge(df_Summary[['Pty Indice', 'Qty (GS/BOM)', 'Rem. Qty']], on='Pty Indice', how='left')
# Ensure 'Rem. Qty' column exists and convert it to integers - 08/14
#merged_df['Rem. Qty'] = merged_df['Rem. Qty'].astype(int)
# Handle missing and infinite values in 'Rem. Qty' - Update 09/03
merged_df['Rem. Qty'] = merged_df['Rem. Qty'].replace([float('inf'), -float('inf')], 0)
merged_df['Rem. Qty'] = merged_df['Rem. Qty'].fillna(0)
# Convert the column to integer
merged_df['Rem. Qty'] = merged_df['Rem. Qty'].astype(int)
# Calculate the minimum value from 'Qty (GS/BOM)' in the merged DataFrame
min_qty_gs = merged_df['Qty (GS/BOM)'].min()
# Drop duplicate rows based on 'Pty Indice'
merged_df = merged_df.drop_duplicates(subset=['Pty Indice'])
# Function to determine the color based on 'Top-Level Status' - Update 09/16 with Top-Level status 'Completed - No Backlog'
def get_status_color(status):
if status == 'Clear-to-Build':
return 'green'
if status == 'Completed - No Backlog':
return '#548235'
elif status == 'Short':
return 'red'
else:
return 'black'
# Iterate over each row in merged_df
for idx, row in merged_df.iterrows():
status_color = get_status_color(row['Top-Level Status'])
# Format the line for display with color coding
line = (
#f"<u>Pty Indice</u>: <b>{row['Pty Indice']}</b> - <b>{row['IDD Top Level']}</b> ({row['SEDA Top Level']})<br>"
f"<u>Pty Indice</u>: <span style='color:{important_text_color};'><b>{row['Pty Indice']}</b> - <b>{row['IDD Top Level']}</b></span> ({row['SEDA Top Level']})<br>"
f"▷ Top Level Status: <b style='color:{status_color};'>{row['Top-Level Status']}</b><br>"
f"▷ Qty of {row['Pty Indice']} Top-Level clear to build based on Purchased Part: <b>{row['Qty clear to build']}</b><br>"
f"▷ Qty of {row['Pty Indice']} Top-Level clear to be released based on Make Part: <b>{min_qty_gs}</b><br>"
f"▷ List of components (<b>Purchased only</b>) missing at IDD to reach the critical quantity (<b>{row['Rem. Qty']}</b>) of {row['Pty Indice']}: <br>"
)
# Append the formatted line to lines list
lines.append(line)
# Join all lines into a single string with double newlines between entries
display_text = '\n'.join(lines)
# Update supply_selected_top with the formatted display_text
supply_selected_top.object = display_text
# Define an initial call to populate the table when the app starts
update_supply_selected_top(None)
#############################################################################################
# Initial call to update_widgets_and_table to populate the table based on default selections
#############################################################################################
# Define supply dashboard
supply_dashboard = pn.Column(
pn.pane.HTML(f"""
<div style="text-align: left;">
<style>
h2 {{ margin-bottom: 0; color: #305496; }} /* Set title color here */
p {{ margin-top: 0; }}
</style>
<h2>Supply Chain</h2>
<p>{f"|Summary| - <b>{file_date}</b>: IDD's inventory status based on QAD (ERP) | [Daily update]"}</p>
</div>
"""),
supply_selected_top,
supply_table,
pn.Spacer(height=20),
pn.Column(message_pane_title, filter_widgets_fullArchi, supply_table_fullArchi), # Encapsulate title and table
sizing_mode='stretch_width', # Adjust sizing mode
#height=600 # Set a fixed height to enforce the maximum height
)
##############################################################################################################################
# --->>>> PRODUCTION <<<---
##############################################################################################################################
# Apply mapping to create 'Program' column in df_WIP
df_WIP['Program'] = df_WIP['Pty Indice'].map(indice_to_program)
# Convert Priority in df_WIP to numeric, coercing errors to NaN
df_WIP['Priority'] = pd.to_numeric(df_WIP['Priority'], errors='coerce')
###########################################
# Create a widget for wip_selected_top
###########################################
# Create a pane for displaying dynamic text (supply_selected_top)
#wip_selected_top = pn.pane.Markdown(sizing_mode='stretch_width') -- text style is different
# Create a pane for displaying dynamic text (supply_selected_top)
wip_selected_top = pn.pane.Str(sizing_mode='stretch_width')
# Define constants for sizing
default_max_height = 200 # Max height for the text pane
max_table_height = 300 # Max height for the table
total_height = 600 # Total height for the layout
row_height = 20 # Adjust row height as needed
# Create a pane for displaying dynamic text with scrolling
#wip_selected_top = pn.pane.Markdown('', sizing_mode='stretch_width')
###########################################
# Create a function for wip_selected_top
###########################################
# Function to split long text into multiple lines
def split_long_text(text, max_length):
words = text.split(', ')
lines = []
current_line = ''
for word in words:
if len(current_line) + len(word) + 2 > max_length: # +2 for ", "
lines.append(current_line)
current_line = word
else:
if current_line:
current_line += ', '
current_line += word
lines.append(current_line)
return '\n'.join(lines)
def update_wip_selected_top(event):
selected_program = program_widget.value
selected_priority = priority_widget.value
selected_indice = indice_widget.value
# Convert 'Priority' and 'Level' to numeric, coercing errors to NaN
df_WIP['Priority'] = pd.to_numeric(df_WIP['Priority'], errors='coerce')
df_WIP['Level'] = pd.to_numeric(df_WIP['Level'], errors='coerce')
# Apply filters
mask = pd.Series(True, index=df_WIP.index)
if selected_program != 'All':
mask &= (df_WIP['Program'] == selected_program)
if selected_priority != 'All':
mask &= (df_WIP['Priority'] == selected_priority)
if selected_indice != 'All':
mask &= (df_WIP['Pty Indice'] == selected_indice)
# Create a copy of the filtered DataFrame to avoid SettingWithCopyWarning
filtered_df_wip = df_WIP[mask].copy()
# Check if any data is available
if filtered_df_wip.empty:
wip_selected_top.object = 'No data available for the selected filters.'
else:
# Forward fill missing values using .loc
filtered_df_wip['WO'] = filtered_df_wip['WO'].fillna(method='ffill')
# Use .loc for forward filling within groups
change_indices = filtered_df_wip.index.to_series().diff().ne(0).cumsum()
filtered_df_wip['WO'] = filtered_df_wip.groupby(change_indices)['WO'].transform(lambda x: x.ffill())
lines = []
filtered_df_wip = filtered_df_wip.drop_duplicates(subset=['Pty Indice', 'WO'])
# Handle cases where 'qty_top_level' or 'qty_sub_level' might be NaN
qty_top_level = filtered_df_wip[filtered_df_wip['Level'] == 0].groupby('Pty Indice')['WO Qty'].sum()
qty_sub_level = filtered_df_wip[filtered_df_wip['Level'] > 0].groupby('Pty Indice')['WO Qty'].sum()
summary_df = pd.DataFrame({
'qty_top_level': qty_top_level,
'qty_sub_level': qty_sub_level
}).reset_index()
for _, row in summary_df.iterrows():
pty_indice = row['Pty Indice']
# Ensure the values are not NaN before converting to int
qty_top_level_value = int(row['qty_top_level']) if pd.notna(row['qty_top_level']) and str(row['qty_top_level']).isdigit() else 0
qty_sub_level_value = int(row['qty_sub_level']) if pd.notna(row['qty_sub_level']) and str(row['qty_sub_level']).isdigit() else 0
# Get details for the current 'Pty Indice'
details = filtered_df_wip[filtered_df_wip['Pty Indice'] == pty_indice].iloc[0]
# Filter work orders for top and sub levels
top_level_df = filtered_df_wip[filtered_df_wip['Level'] == 0]
sub_level_df = filtered_df_wip[filtered_df_wip['Level'] > 0]
# Get unique work orders for top level and sub level, and filter out NaNs
unique_wo_top = top_level_df[top_level_df['Pty Indice'] == pty_indice]['WO'].astype(str).unique()
unique_wo_sub = sub_level_df[sub_level_df['Pty Indice'] == pty_indice]['WO'].astype(str).unique()
# Convert arrays to lists
list_wo_top = ', '.join(unique_wo_top) if unique_wo_top.size > 0 else 'None'
list_wo_sub = ', '.join(unique_wo_sub) if unique_wo_sub.size > 0 else 'None'
# Split long text into multiple lines
list_wo_top = split_long_text(list_wo_top, max_length=120) # Adjust max_length as needed
list_wo_sub = split_long_text(list_wo_sub, max_length=120) # Adjust max_length as needed
# Calculate the number of unique work orders and components
unique_wo_qty_top = len(unique_wo_top)
unique_wo_qty_sub = len(unique_wo_sub)
unique_sub_PN = sub_level_df[sub_level_df['Pty Indice'] == pty_indice]['IDD Component'].nunique()
# Construct the line for output based on availability
top_level_info = (
f"▷ Quantity <b><span style='color:{important_text_color};'>Top-Level</span></b> {pty_indice} on the floor: <b>{qty_top_level_value}</b> Top-Level within <b>{unique_wo_qty_top}</b> WO:<br> {list_wo_top}"
if qty_top_level_value > 0 else f"▷ Quantity {pty_indice} Top Level on the floor: No Top-Level on the floor"
)
sub_level_info = (
f"▷ Quantity of {pty_indice}'s related <b><span style='color:{important_text_color};'>Sub-Level</span></b> on the floor</b>: Total of <b>{qty_sub_level_value}</b> Sub-Level, including <b>{unique_sub_PN}</b> unique components within <b>{unique_wo_qty_sub}</b> WO:\n{list_wo_sub}"
if qty_sub_level_value > 0 else f"▷ Quantity of related {pty_indice} sub-Level on the floor: No Sub-Level on the floor"
)
line = (
#f"<u>Pty Indice</u>: <b>{pty_indice}</b> - <b>{details['IDD Top Level']}</b> ({details['SEDA Top Level']})<br>"
f"<u>Pty Indice</u>: <span style='color:{important_text_color};'><b>{pty_indice}</b> - <b>{details['IDD Top Level']}</b></span> ({details['SEDA Top Level']})<br>"
f"{top_level_info}<br>"
f"{sub_level_info}<br>"
)
lines.append(line)
display_text = '\n'.join(lines)
wip_selected_top.object = display_text
# Define an initial call to populate the table when the app starts
update_wip_selected_top(None)
#########################################################
# New 09/16
# Define filtering widgets for filters_Prod
#########################################################
label_wo = pn.pane.HTML('<b style="color:#2B70B3;">WO Filter</b>')
label_idd_component = pn.pane.HTML('<b style="color:#2B70B3;">IDD Component Filter</b>')
filters_Prod = {
'WO': pn.widgets.TextInput(name=''),
'IDD Component': pn.widgets.TextInput(name=''),
}
# Create buttons for applying and resetting filters
filters_Prod_button = pn.widgets.Button(name='Apply Filters', button_type='primary')
reset_Prod_button = pn.widgets.Button(name='Reset Filters', button_type='default')
# Set default value to None for all filter widgets
for widget in filters_Prod.values():
widget.value = ''
# Create the layout with labels, filter widgets, and buttons
filter_widgets_Prod = pn.Row(
pn.Column(label_wo, filters_Prod['WO']),
pn.Column(label_idd_component, filters_Prod['IDD Component']),
pn.Column(
pn.Spacer(height=25), # Spacer before the buttons
pn.Row(filters_Prod_button, reset_Prod_button)
)
)
############################################
# Create a function for wip_table
############################################
# Update 09/16
def update_wip_table():
selected_program = program_widget.value
selected_priority = priority_widget.value
selected_indice = indice_widget.value
wo_filter = filters_Prod['WO'].value
idd_component_filter = filters_Prod['IDD Component'].value
# Apply filters to df_WIP
mask = pd.Series(True, index=df_WIP.index)
if selected_program != 'All':
mask &= (df_WIP['Program'] == selected_program)
if selected_priority != 'All':
mask &= (df_WIP['Priority'] == selected_priority)
if selected_indice != 'All':
mask &= (df_WIP['Pty Indice'] == selected_indice)
if wo_filter:
mask &= (df_WIP['WO'].str.contains(wo_filter, case=False, na=False))
if idd_component_filter:
mask &= (df_WIP['IDD Component'].str.contains(idd_component_filter, case=False, na=False))
filtered_df_wip = df_WIP.loc[mask].copy()
if filtered_df_wip.empty:
# Display a placeholder message if no data is available
wip_table.object = pd.DataFrame({
'Pty Indice': ['No Data'],
'WO': [''],
'WO Qty': [''],
'Last movement': [''],
'Area': [''],
'IDD Component': [''],
'Level': [''],
'Description Component': [''],
'Release': [''],
'BOM Index': ['']
})
message_pane.object = 'No data available'
else:
# Date processing
filtered_df_wip['Last movement'] = pd.to_datetime(filtered_df_wip['Last movement'], errors='coerce')
filtered_df_wip['Release'] = pd.to_datetime(filtered_df_wip['Release'], errors='coerce')
filtered_df_wip['Last movement'] = filtered_df_wip['Last movement'].fillna(pd.NaT)
filtered_df_wip['Release'] = filtered_df_wip['Release'].fillna(pd.NaT)
# Group by 'WO' and select most recent 'Last movement'
def select_most_recent(group):
return group.loc[group['Last movement'].idxmax()]
filtered_df_wip = filtered_df_wip.groupby('WO').apply(select_most_recent).reset_index(drop=True)
filtered_df_wip = filtered_df_wip.sort_values(by='Release')
# Format dates
filtered_df_wip['Last movement'] = filtered_df_wip['Last movement'].dt.strftime('%m-%d-%Y')
filtered_df_wip['Release'] = filtered_df_wip['Release'].dt.strftime('%m-%d-%Y')
# Sort and clean DataFrame
filtered_df_wip = filtered_df_wip[['Pty Indice', 'WO', 'WO Qty', 'Last movement', 'Area', 'IDD Component', 'Level', 'Description Component', 'Release', 'BOM Index']]
filtered_df_wip['Level'] = filtered_df_wip['Level'].fillna(-1)
filtered_df_wip = filtered_df_wip.sort_values(by=['Pty Indice', 'BOM Index'])
filtered_df_wip = filtered_df_wip.drop(columns=['BOM Index'])
# Apply color formatting
styles = apply_color_formatting(filtered_df_wip) # Ensure this function returns a valid DataFrame
styled_df = filtered_df_wip.style.apply(lambda x: styles.loc[x.name], axis=1).hide(axis='index')
wip_table.object = styled_df
message_pane.object = ""
# Initialize the wip_table pane with an empty DataFrame
wip_table = pn.pane.DataFrame(
pd.DataFrame(columns=df_WIP.columns),
sizing_mode='stretch_width',
height=500
)
message_pane = pn.pane.Markdown("", sizing_mode='stretch_width')
# Initial call to populate the table based on default selections
update_wip_table()
##########################################################
# New 09/16
# Define callback function for the Apply Filters button
###########################################################
# Define callback functions for the buttons
def on_filter_button_click_Prod(event):
update_wip_table()
def on_reset_button_click_prod(event):
for widget in filters_Prod.values():
widget.value = ""
update_wip_table()
filters_Prod_button.on_click(on_filter_button_click_Prod)
reset_Prod_button.on_click(on_reset_button_click_prod)
# Set up callbacks for all widgets
def widget_change_prod(event):
update_wip_table()
##########################################################
#******#########################*******########################*********************************
#########################################################################################################################
# Create Graph 13bis - Combinaison (side by side) of Graph 13 and 13b from the tab |General Overview|
#******#########################*******########################*******************************##########################
def create_placeholder_plot(message):
# Create an empty figure
p = figure(height=250, width=400, title=message)
p.xaxis.visible = False
p.yaxis.visible = False
p.grid.visible = False
p.add_layout(Title(text=message, align='center', text_font_size='10pt', text_color="#002570"), 'above')
return p
########################################################################################################
#Copying pivot table for graphs 13bis and 13bbis and sort by Pty Indice to get PXA before PXB etc.
pivot_table_combined_2 = pivot_table_combined.copy()
#print('pivot_table_combined_2')
#display(pivot_table_combined_2)
# Sort pivot_table_combined_2 by 'Pty Indice'
pivot_table_combined_2 = pivot_table_combined_2.sort_values(by='Pty Indice')
#print('pivot_table_combined_2')
#display(pivot_table_combined_2)
# Mapping on pivot_table_combined to get program
pivot_table_combined_2['Program'] = pivot_table_combined_2['Pty Indice'].map(indice_to_program)
###################################################
# Palette for color of bars and y labels - Order does not matter
###################################################
custom_palette13bis = {"Standard Order":"#A08EBC",
"DPAS Order": "#E4DFEC",
"Qty clear to build": "#7FDB91",
"Qty WIP": "#DAEEF3"}
custom_palette13bbis = {"Total Critical Qty": "#FFA07A",
"Qty Shipped": "#5AB2CA",
"Remain. crit. Qty": "#778899",
"IDD Backlog Qty": "#cdbedd"}
#//////////////////////////////////////////////////
###################################################
# create_plot_13bis
###################################################
def create_plot_13bis():
selected_program = program_widget.value
selected_priority = priority_widget.value
selected_indice = indice_widget.value
mask = pd.Series(True, index=pivot_table_combined_2.index)
if selected_program != 'All':
mask &= (pivot_table_combined_2['Program'] == selected_program)
if selected_priority != 'All':
mask &= (pivot_table_combined_2['Priority'] == selected_priority)
if selected_indice != 'All':
mask &= (pivot_table_combined_2['Pty Indice'] == selected_indice)
filtered_df = pivot_table_combined_2.loc[mask].copy()
#####################################################################
# Dynamic y bounds for create_plot_13bis
#####################################################################
if filtered_df.empty:
return create_placeholder_plot('No data available for the selected filters.')
max_quantity_value13bis = filtered_df[['Qty WIP', 'DPAS Order', 'Standard Order' , 'Qty clear to build']].max().max()
min_quantity_value13bis = filtered_df[['Qty WIP', 'DPAS Order', 'Standard Order', 'Qty clear to build']].min().min()
max_y_bound13bis = max_quantity_value13bis * 2
min_y_bound13bis = min_quantity_value13bis * 2 if min_quantity_value13bis < 0 else 0
#Keep order of pivot_table_combined: 'Standard Order', 'DPAS Order', 'Qty WIP', 'Qty clear to build'
# Melt the dataframe --> Reverse order of the bars on the graph
melted_df = filtered_df.melt(id_vars=['Pty Indice'],
value_vars=['Qty WIP', 'Qty clear to build', 'Standard Order', 'DPAS Order'],
var_name='Quantity Type', value_name='Quantity Value')
#############################################
# Set Order of melted_df --> Order of y label
#############################################
# Define the order of categories for 'Quantity Type'
unique_quantity_types = ['DPAS Order', 'Standard Order', 'Qty clear to build', 'Qty WIP']
# Convert 'Quantity Type' to a categorical type with the defined order
melted_df['Quantity Type'] = pd.Categorical(melted_df['Quantity Type'], categories=unique_quantity_types, ordered=True)
##################################################
# Define unique indices and calculate x_combined
##################################################
# New code 08/07
# Define constants
unique_indices = melted_df['Pty Indice'].astype('category').cat.categories
unique_quantity_types = melted_df['Quantity Type'].astype('category').cat.categories
num_types = len(unique_quantity_types) # Number of bars per 'Pty Indice'
num_indice = len(unique_indices) # Number of selected 'Pty Indice'
# Generate base_positions based on enumerate(unique_indices)
base_positions = {indice: i * (num_indice + 1) for i, indice in enumerate(unique_indices)}
# Define gaps -- The gap is supposed to change based on the number of num_indice
def get_gap(num_indice):
# Define a mapping of num_indice to gap values
gap_map = {
3: 0.75,
4: 0.5,
5: 0.33, # Example value, adjust as needed
6: 0.25, # Example value, adjust as needed
8: 0.175, # Example value, adjust as needed
9: 0.125 # Example value, adjust as needed
}
# Return the gap based on the number of indices
return gap_map.get(num_indice, 0.5) # Default to 0.5 if num_indice is not found
# Get Gap
gap = get_gap(num_indice)
# Create a mapping of 'Pty Indice' to its index, starting from 0
indice_mapping = {indice: i for i, indice in enumerate(unique_indices)}
# Calculate x_combined for the bar positions
def calculate_x_combined(row):
pty_indice = row['Pty Indice']
quantity_type_code = melted_df['Quantity Type'].cat.codes[row.name]
# Get the index of the current 'Pty Indice'
indice = indice_mapping[pty_indice]
# Calculate x_combined
x_combined = (base_positions[pty_indice]
+ quantity_type_code
+ 1 / (num_indice + 1) # Small offset to separate bars
+ gap * indice) # Adjust for the gap
# Optionally, print debug information
# print(f"Pty Indice: {pty_indice}, Quantity Type Code: {quantity_type_code}, base position: {base_positions[pty_indice]}, x_combined: {x_combined}")
return x_combined
# Apply the function to calculate x_combined
melted_df['x_combined'] = melted_df.apply(calculate_x_combined, axis=1)
# To inspect the result
#print(melted_df[['Pty Indice', 'Quantity Type', 'x_combined']])
###################
# Create the plot
##################
plot = melted_df.hvplot.bar(
x='Pty Indice',
y='Quantity Value',
by='Quantity Type',
color='Quantity Type',
cmap=custom_palette13bis,
#title='IDD Type of order (DPAS/Standard), Qty Clear-to-Build & Qty WIP per Pty Indice',
xlabel='Pty Indice',
ylabel='Top-Level [Quantity]',
legend='top_right',
stacked=False,
bar_width=0.6, # Set bar width - 09/12
tools=[],
).opts(
xrotation=90,
)
updated_bokeh_plot = hv.render(plot, backend='bokeh')
updated_bokeh_plot.tools = [tool for tool in updated_bokeh_plot.tools if not isinstance(tool, HoverTool)]
hover = HoverTool()
hover.tooltips = [
("Pty Indice", "@Pty_Indice"),
("KPI", "@color"),
("Value", "@Quantity_Value"),
]
updated_bokeh_plot.add_tools(hover)
# 09/12 - Set wheel woom inactive
updated_bokeh_plot.toolbar.active_scroll = None
# Customizations
updated_bokeh_plot.xaxis.major_label_text_font_size = '0pt'
updated_bokeh_plot.yaxis.major_label_text_font_size = '10pt'
#updated_bokeh_plot.title.text_font_size = '8pt'
#updated_bokeh_plot.title.text_color = "#002570"
updated_bokeh_plot.xaxis.axis_line_width = 2
updated_bokeh_plot.yaxis.axis_line_width = 2
updated_bokeh_plot.xaxis.major_label_orientation = 'vertical'
updated_bokeh_plot.yaxis.major_label_orientation = 'horizontal'
updated_bokeh_plot.yaxis.axis_label_text_font_size = '10pt'
updated_bokeh_plot.xgrid.grid_line_color = None
updated_bokeh_plot.ygrid.grid_line_color = '#F2F2F2'
updated_bokeh_plot.ygrid.grid_line_dash = [6, 4]
updated_bokeh_plot.y_range = Range1d(start=min_y_bound13bis, end=max_y_bound13bis)
updated_bokeh_plot.toolbar.logo = None
updated_bokeh_plot.legend.label_text_font_size = '8pt'
# Add custom formatted title
updated_bokeh_plot.add_layout(Title(
text="Backlog KPI#1",
align='center',
text_font_size='10pt', # Adjust font size for the title
text_color="#002570" # Adjust color for the title
), 'above')
# Add labels on top of the bars
source = ColumnDataSource(melted_df)
labels = LabelSet(
x= 'x_combined',
#x= 'Quantity Type',
y='Quantity Value',
text='Quantity Value',
level='glyph',
source=source,
text_font_size='8pt',
text_font_style='bold', # Set the font style to bold
text_align='center',
text_baseline='bottom', # Place labels above the bars
y_offset= 5, # Dynamically set the offset based on Quantity Value,
text_color={'field': 'Quantity Type', 'transform': CategoricalColorMapper(
factors=unique_quantity_types, palette=[custom_palette13bis[qtype] for qtype in unique_quantity_types]
)}
)
updated_bokeh_plot.add_layout(labels)
# Debugging: Output the calculated x_combined and other columns
#print("Values in melted_df with x_combined:")
#print(melted_df[['Pty Indice', 'Quantity Type', 'Quantity Value', 'x_combined']])
return updated_bokeh_plot
#//////////////////////////////////////////////////
###################################################
# create_plot_13bbis
###################################################
def create_plot_13bbis():
selected_program = program_widget.value
selected_priority = priority_widget.value
selected_indice = indice_widget.value
mask = pd.Series(True, index=pivot_table_combined_2.index)
if selected_program != 'All':
mask &= (pivot_table_combined_2['Program'] == selected_program)
if selected_priority != 'All':
mask &= (pivot_table_combined_2['Priority'] == selected_priority)
if selected_indice != 'All':
mask &= (pivot_table_combined_2['Pty Indice'] == selected_indice)
filtered_df = pivot_table_combined_2.loc[mask].copy()
#####################################################################
# Dynamic y bounds for create_plot_13bbis
#####################################################################
if filtered_df.empty:
return create_placeholder_plot('No data available for the selected filters.')
max_quantity_value13bbis = filtered_df[['Total Critical Qty', 'Qty Shipped', 'Remain. crit. Qty', 'IDD Backlog Qty']].max().max()
min_quantity_value13bbis = filtered_df[['Total Critical Qty', 'Qty Shipped', 'Remain. crit. Qty', 'IDD Backlog Qty']].min().min()
max_y_bound13bbis = max_quantity_value13bbis * 2
min_y_bound13bbis = min_quantity_value13bbis * 2 if min_quantity_value13bbis < 0 else 0
# keep order of pivot_table_combined: 'Total Critical Qty', 'Qty Shipped', 'Remain. crit. Qty', 'IDD Backlog Qty'
melted_df = filtered_df.melt(id_vars=['Pty Indice'],
value_vars=['Total Critical Qty', 'Qty Shipped', 'Remain. crit. Qty', 'IDD Backlog Qty'],
var_name='Quantity Type', value_name='Quantity Value')
#############################################
# Set Order of melted_df --> Order of y label
#############################################
# Define the order of categories for 'Quantity Type'
unique_quantity_types = ['IDD Backlog Qty', 'Remain. crit. Qty', 'Qty Shipped', 'Total Critical Qty']
# Convert 'Quantity Type' to a categorical type with the defined order
melted_df['Quantity Type'] = pd.Categorical(melted_df['Quantity Type'], categories=unique_quantity_types, ordered=True)
####################################################
# Define unique indices and calculate x_combined
##################################################
# New code 08/07
# Define constants
unique_indices = melted_df['Pty Indice'].astype('category').cat.categories
unique_quantity_types = melted_df['Quantity Type'].astype('category').cat.categories
num_types = len(unique_quantity_types) # Number of bars per 'Pty Indice'
num_indice = len(unique_indices) # Number of selected 'Pty Indice'
# Generate base_positions based on enumerate(unique_indices)
base_positions = {indice: i * (num_indice + 1) for i, indice in enumerate(unique_indices)}
#define gaps -- The gap is suppose to change based on the number of num_indice (for num_indice = 4 -- gap = 0.5 works well)
#gap = 0.5
gap = 1/(num_indice/2)
# Create a mapping of 'Pty Indice' to its index, starting from 0
indice_mapping = {indice: i for i, indice in enumerate(unique_indices)}
# Calculate x_combined for the bar positions
def calculate_x_combined(row):
pty_indice = row['Pty Indice']
quantity_type_code = melted_df['Quantity Type'].cat.codes[row.name]
# Get the index of the current 'Pty Indice'
indice = indice_mapping[pty_indice]
x_combined = base_positions[pty_indice] + quantity_type_code + 1/(num_indice + 1) + gap*indice #Calculation of the gap is yet to be refined as it does not work for all cases when Pty indice > 4
return x_combined
# Apply the function to calculate x_combined
melted_df['x_combined'] = melted_df.apply(calculate_x_combined, axis=1)
#####################################################
plot = melted_df.hvplot.bar(
x='Pty Indice',
y='Quantity Value',
by='Quantity Type',
color='Quantity Type',
cmap=custom_palette13bbis,
#title='<div style="text-align: center;">IDD Total Backlog, Total Critical Quantity,<br> Qty Shipped & Remaining Critical Qty per Pty Indice</div>',
xlabel='Pty Indice',
ylabel='Top-Level [Quantity]',
legend='top_right',
stacked=False,
bar_width=0.6, # Set bar width - 09/12
tools=[],
).opts(
xrotation=90,
)
updated_bokeh_plot = hv.render(plot, backend='bokeh')
updated_bokeh_plot.tools = [tool for tool in updated_bokeh_plot.tools if not isinstance(tool, HoverTool)]
hover = HoverTool()
hover.tooltips = [
("Pty Indice", "@Pty_Indice"),
("KPI", "@color"),
("Value", "@Quantity_Value"),
]
updated_bokeh_plot.add_tools(hover)
# 09/12 - Set wheel woom inactive
updated_bokeh_plot.toolbar.active_scroll = None
updated_bokeh_plot.xaxis.major_label_text_font_size = '0pt'
updated_bokeh_plot.yaxis.major_label_text_font_size = '10pt'
#updated_bokeh_plot.title.text_font_size = '8pt'
#updated_bokeh_plot.title.text_color = "#002570"
updated_bokeh_plot.xaxis.axis_line_width = 2
updated_bokeh_plot.yaxis.axis_line_width = 2
updated_bokeh_plot.xaxis.major_label_orientation = 'vertical'
updated_bokeh_plot.yaxis.major_label_orientation = 'horizontal'
updated_bokeh_plot.yaxis.axis_label_text_font_size = '10pt'
updated_bokeh_plot.xgrid.grid_line_color = None
updated_bokeh_plot.ygrid.grid_line_color = '#F2F2F2'
updated_bokeh_plot.ygrid.grid_line_dash = [6, 4]
updated_bokeh_plot.y_range = Range1d(start=min_y_bound13bbis, end=max_y_bound13bbis)
updated_bokeh_plot.toolbar.logo = None
updated_bokeh_plot.legend.label_text_font_size = '8pt' # Set the font size of legend text
# Add custom formatted title
updated_bokeh_plot.add_layout(Title(
text="Backlog KPI#2",
align='center',
text_font_size='10pt', # Adjust font size for the title
text_color="#002570" # Adjust color for the title
), 'above')
# Add labels on top of the bars
source = ColumnDataSource(melted_df)
labels = LabelSet(
x= 'x_combined',
#x= 'Quantity Type',
y='Quantity Value',
text='Quantity Value',
level='glyph',
source=source,
text_font_size='8pt',
text_font_style='bold', # Set the font style to bold
text_align='center',
text_baseline='bottom', # Place labels above the bars
y_offset=5,
text_color={'field': 'Quantity Type', 'transform': CategoricalColorMapper(
factors=unique_quantity_types, palette=[custom_palette13bbis[qtype] for qtype in unique_quantity_types]
)}
)
updated_bokeh_plot.add_layout(labels)
return updated_bokeh_plot
#############
# Inital call
#############
plot_pane_13bis = pn.pane.Bokeh(create_plot_13bis())
plot_pane_13bbis = pn.pane.Bokeh(create_plot_13bbis())
################################################################
# Update methods to include messages when no data is available
###############################################################
def update_plot_13bis(event):
plot_pane_13bis.object = create_plot_13bis()
def update_plot_13bbis(event):
plot_pane_13bbis.object = create_plot_13bbis()
#########################################################################################################################
# Create Graph 14-14b - Combinaison (side by side) of Graph 14 and 14b from the tab |General Overview|
#******#########################*******########################*******************************##########################
#Copying pivot table for graphs 14bis and 14bbis
pivot_table_14_2 = pivot_table_14.copy()
# Sort pivot_table_combined_2 by 'Pty Indice' - Update 08/28
#pivot_table_14_2 = pivot_table_14_2.sort_values(by='Pty Indice')
pivot_table_14_2.sort_values(by=['Priority', 'Pty Indice'], inplace=True)
# Mapping on pivot_table_combined to get program
pivot_table_14_2['Program'] = pivot_table_14_2['Pty Indice'].map(indice_to_program)
custom_palette14bis = {
"IDD Expected Total Sales": "rgba(68, 114, 196, 0.8)", # #4472C4 with alpha 0.8
"IDD Expected Total Margin": "rgba(63, 201, 89, 0.5)", # #3FC959 with alpha 0.5
"IDD Current Sales (Total)": "#4472C3",
"IDD Current Margin (Total)": "#548235",
}
custom_palette14bis_2 = {
"IDD Expected Total Sales": "rgba(68, 114, 196, 0.8)", # #4472C4 with alpha 0.8
"IDD Expected Total Margin": "rgba(63, 201, 89, 0.5)", # #3FC959 with alpha 0.5
"IDD Realized Sales": "#4472C3",
"IDD Realized Margin": "#548235",
}
custom_palette14bbis = {
"IDD Current Margin (%)": "#E2EFDA",
"% Completion": "#7FDB91",
"% DPAS Order": "#E4DFEC",
"IDD Expected ROI (Total)": "#568838",
}
#//////////////////////////////////////////////////
##################################################
# Create graph 14bis and 14bbis ---> Financial KPI
# --> To be update 09/23 to use df_Historic instead of df_Snapshot to calculate the 'Realized sales' and 'Realized Margin'
# The calculation should be based on the real data from the df_Historic trunover Report including the change of price over time
# New columns introduced in df_Snapshot:
# df_snapshot['IDD AVG realized sales price [USD]']
# df_snapshot['IDD AVG realized Margin Standard [USD]']
# df_snapshot['IDD AVG realized Margin [%]']
####################################################
##### New 09/24 to replace Graph 14bis with newlly added column in df_snapshot
def create_plot_14bis_2():
selected_program = program_widget.value
selected_priority = priority_widget.value
selected_indice = indice_widget.value
mask = pd.Series(True, index=pivot_table_14_2.index)
if selected_program != 'All':
mask &= (pivot_table_14_2['Program'] == selected_program)
if selected_priority != 'All':
mask &= (pivot_table_14_2['Priority'] == selected_priority)
if selected_indice != 'All':
mask &= (pivot_table_14_2['Pty Indice'] == selected_indice)
filtered_df = pivot_table_14_2.loc[mask].copy()
#####################################################################
# Dynamic y bounds for create_plot_14bis
#####################################################################
if filtered_df.empty:
return create_placeholder_plot('No data available for the selected filters.')
max_quantity_value14bis_2 = filtered_df[['IDD Expected Total Sales', 'IDD Expected Total Margin', 'IDD Realized Sales', 'IDD Realized Margin']].max().max()
min_quantity_value14bis_2 = filtered_df[['IDD Expected Total Sales', 'IDD Expected Total Margin', 'IDD Realized Sales', 'IDD Realized Margin']].min().min()
max_y_bound14bis_2 = max_quantity_value14bis_2 * 2
min_y_bound14bis_2 = min_quantity_value14bis_2 * 2 if min_quantity_value14bis_2 < 0 else 0
melted_df = filtered_df.melt(id_vars=['Pty Indice'],
value_vars=['IDD Expected Total Sales', 'IDD Expected Total Margin', 'IDD Realized Sales', 'IDD Realized Margin'],
var_name='Quantity Type', value_name='Quantity Value')
# Add a column with formatted values in thousands with '$Xk' prefix and rounded to whole numbers
#melted_df['Formatted Value'] = melted_df['Quantity Value'].apply(lambda x: f"${x / 1000:,.0f}k")
#############################################
# Set Order of melted_df --> Order of y label
#############################################
# Define the order of categories for 'Quantity Type'
unique_quantity_types = ['IDD Realized Margin', 'IDD Realized Sales', 'IDD Expected Total Margin', 'IDD Expected Total Sales']
# Convert 'Quantity Type' to a categorical type with the defined order
melted_df['Quantity Type'] = pd.Categorical(melted_df['Quantity Type'], categories=unique_quantity_types, ordered=True)
####################################################
# Define unique indices and calculate x_combined
##################################################
# New code 08/07
# Define constants
unique_indices = melted_df['Pty Indice'].astype('category').cat.categories
unique_quantity_types = melted_df['Quantity Type'].astype('category').cat.categories
num_types = len(unique_quantity_types) # Number of bars per 'Pty Indice'
num_indice = len(unique_indices) # Number of selected 'Pty Indice'
# Generate base_positions based on enumerate(unique_indices)
base_positions = {indice: i * (num_indice + 1) for i, indice in enumerate(unique_indices)}
#define gaps -- The gap is suppose to change based on the number of num_indice (for num_indice = 4 -- gap = 0.5 works well)
#gap = 0.5
gap = 1/(num_indice/2)
# Create a mapping of 'Pty Indice' to its index, starting from 0
indice_mapping = {indice: i for i, indice in enumerate(unique_indices)}
# Calculate x_combined for the bar positions
def calculate_x_combined(row):
pty_indice = row['Pty Indice']
quantity_type_code = melted_df['Quantity Type'].cat.codes[row.name]
# Get the index of the current 'Pty Indice'
indice = indice_mapping[pty_indice]
x_combined = base_positions[pty_indice] + quantity_type_code + 1/(num_indice + 1) + gap*indice #Calculation of the gap is yet to be refined as it does not work for all cases when Pty indice > 4
return x_combined
# Apply the function to calculate x_combined
melted_df['x_combined'] = melted_df.apply(calculate_x_combined, axis=1)
#####################################################
#New 09/11 - Calculate Y position
# Calculate the y_offset dynamically based on the 'Quantity Value'
#melted_df['y_position'] = melted_df['Quantity Value'] + melted_df['Quantity Value']*0.1
# Compute the maximum value of Quantity Value
max_quantity_value = melted_df['Quantity Value'].max()
# Calculate the 5% offset of the maximum value
offset = max_quantity_value * 0.05
# Define the function to calculate y_position with the conditional offset
def calculate_y_position(quantity_value):
if quantity_value >= 0:
return quantity_value + offset
else:
return offset # Apply offset in the opposite direction for negative values
# Apply the function to the DataFrame
melted_df['y_position'] = melted_df['Quantity Value'].apply(calculate_y_position)
#print('melted_df')
#display(melted_df)
plot = melted_df.hvplot.bar(
x='Pty Indice',
y='Quantity Value',
by='Quantity Type',
color='Quantity Type',
cmap=custom_palette14bis_2,
#title='IDD Total Sales & IDD Marge per Pty Indice by Top-Level Status, Production Status & Product Category',
xlabel='Pty Indice',
ylabel='[K$]',
legend='top_right',
stacked=False,
bar_width=0.6, # Set bar width - 09/12
#padding=1,
tools=[],
).opts(
xrotation=90,
)
updated_bokeh_plot = hv.render(plot, backend='bokeh')
updated_bokeh_plot.tools = [tool for tool in updated_bokeh_plot.tools if not isinstance(tool, HoverTool)]
hover = HoverTool()
hover.tooltips = [
("Pty Indice", "@Pty_Indice"),
("KPI", "@color"),
("Quantity Value", "@Quantity_Value{($0,0k)}") # Format values: thousands with 'K' # Quantity_Value with the '_' otherwise that does not work!
]
updated_bokeh_plot.add_tools(hover)
# Remove wheel zoom from active tools if you want it inactive by default - 08/12
#updated_bokeh_plot.tools = [tool for tool in updated_bokeh_plot.tools if not isinstance(tool, WheelZoomTool)]
# 09/12 - Set wheel woom inactive
updated_bokeh_plot.toolbar.active_scroll = None
updated_bokeh_plot.xaxis.major_label_text_font_size = '0pt'
updated_bokeh_plot.yaxis.major_label_text_font_size = '10pt'
#updated_bokeh_plot.title.text_font_size = '8pt'
#updated_bokeh_plot.title.text_color = "#002570"
updated_bokeh_plot.xaxis.axis_line_width = 2
updated_bokeh_plot.yaxis.axis_line_width = 2
updated_bokeh_plot.xaxis.major_label_orientation = 'vertical'
updated_bokeh_plot.yaxis.major_label_orientation = 'horizontal'
updated_bokeh_plot.yaxis.axis_label_text_font_size = '10pt'
updated_bokeh_plot.xgrid.grid_line_color = None
updated_bokeh_plot.ygrid.grid_line_color = '#F2F2F2'
updated_bokeh_plot.ygrid.grid_line_dash = [6, 4]
updated_bokeh_plot.y_range = Range1d(start=min_y_bound14bis_2, end=max_y_bound14bis_2)
updated_bokeh_plot.toolbar.logo = None
updated_bokeh_plot.legend.label_text_font_size = '8pt' # Set the font size of legend text
# Add custom formatted title
updated_bokeh_plot.add_layout(Title(
text="Financial KPI",
align='center',
text_font_size='10pt', # Adjust font size for the title
text_color="#002570" # Adjust color for the title
), 'above')
# Format the y-axis ticks in thousands with a dollar sign
updated_bokeh_plot.yaxis.formatter =CustomJSTickFormatter(code="""
return '$' + (tick / 1000).toFixed(0) + 'k';
""")
#Format the y-label to display on the graph
melted_df['formatted_labels'] = melted_df['Quantity Value'].apply(lambda x: f"${x / 1000:.0f}k")
# Add labels on top of the bars
source = ColumnDataSource(melted_df)
labels = LabelSet(
x= 'x_combined',
y='y_position',
#x= 'Quantity Type',
#y='Quantity Value',
#text='Quantity Value',
text='formatted_labels',
level='glyph',
source=source,
text_font_size='8pt',
text_font_style='bold', # Set the font style to bold
text_align='center',
#text_baseline='bottom', # Place labels above the bars #09/11
#y_offset=5, #09/11
text_color={'field': 'Quantity Type', 'transform': CategoricalColorMapper(
factors=unique_quantity_types, palette=[custom_palette14bis_2[qtype] for qtype in unique_quantity_types]
)}
)
updated_bokeh_plot.add_layout(labels)
return updated_bokeh_plot
##################################################################################################################################################################################################
# CODE NOT USED IN THE DASHBOARD -->> Replaced by create_plot_14_2 09/23 with the newlly created 'IDD AVG realized sales price [USD]', 'IDD AVG realized Margin Standard [USD]' in df_Snapshot
##################################################################################################################################################################################################
def create_plot_14bis():
selected_program = program_widget.value
selected_priority = priority_widget.value
selected_indice = indice_widget.value
mask = pd.Series(True, index=pivot_table_14_2.index)
if selected_program != 'All':
mask &= (pivot_table_14_2['Program'] == selected_program)
if selected_priority != 'All':
mask &= (pivot_table_14_2['Priority'] == selected_priority)
if selected_indice != 'All':
mask &= (pivot_table_14_2['Pty Indice'] == selected_indice)
filtered_df = pivot_table_14_2.loc[mask].copy()
#####################################################################
# Dynamic y bounds for create_plot_14bis
#####################################################################
if filtered_df.empty:
return create_placeholder_plot('No data available for the selected filters.')
max_quantity_value14bis = filtered_df[['IDD Expected Total Sales', 'IDD Expected Total Margin', 'IDD Current Sales (Total)', 'IDD Current Margin (Total)']].max().max()
min_quantity_value14bis = filtered_df[['IDD Expected Total Sales', 'IDD Expected Total Margin', 'IDD Current Sales (Total)', 'IDD Current Margin (Total)']].min().min()
max_y_bound14bis = max_quantity_value14bis * 2
min_y_bound14bis = min_quantity_value14bis * 2 if min_quantity_value14bis < 0 else 0
melted_df = filtered_df.melt(id_vars=['Pty Indice'],
value_vars=['IDD Expected Total Sales', 'IDD Expected Total Margin', 'IDD Current Sales (Total)', 'IDD Current Margin (Total)'],
var_name='Quantity Type', value_name='Quantity Value')
# Add a column with formatted values in thousands with '$Xk' prefix and rounded to whole numbers
#melted_df['Formatted Value'] = melted_df['Quantity Value'].apply(lambda x: f"${x / 1000:,.0f}k")
#############################################
# Set Order of melted_df --> Order of y label
#############################################
# Define the order of categories for 'Quantity Type'
unique_quantity_types = ['IDD Current Margin (Total)', 'IDD Current Sales (Total)', 'IDD Expected Total Margin', 'IDD Expected Total Sales']
# Convert 'Quantity Type' to a categorical type with the defined order
melted_df['Quantity Type'] = pd.Categorical(melted_df['Quantity Type'], categories=unique_quantity_types, ordered=True)
####################################################
# Define unique indices and calculate x_combined
##################################################
# New code 08/07
# Define constants
unique_indices = melted_df['Pty Indice'].astype('category').cat.categories
unique_quantity_types = melted_df['Quantity Type'].astype('category').cat.categories
num_types = len(unique_quantity_types) # Number of bars per 'Pty Indice'
num_indice = len(unique_indices) # Number of selected 'Pty Indice'
# Generate base_positions based on enumerate(unique_indices)
base_positions = {indice: i * (num_indice + 1) for i, indice in enumerate(unique_indices)}
#define gaps -- The gap is suppose to change based on the number of num_indice (for num_indice = 4 -- gap = 0.5 works well)
#gap = 0.5
gap = 1/(num_indice/2)
# Create a mapping of 'Pty Indice' to its index, starting from 0
indice_mapping = {indice: i for i, indice in enumerate(unique_indices)}
# Calculate x_combined for the bar positions
def calculate_x_combined(row):
pty_indice = row['Pty Indice']
quantity_type_code = melted_df['Quantity Type'].cat.codes[row.name]
# Get the index of the current 'Pty Indice'
indice = indice_mapping[pty_indice]
x_combined = base_positions[pty_indice] + quantity_type_code + 1/(num_indice + 1) + gap*indice #Calculation of the gap is yet to be refined as it does not work for all cases when Pty indice > 4
return x_combined
# Apply the function to calculate x_combined
melted_df['x_combined'] = melted_df.apply(calculate_x_combined, axis=1)
#####################################################
#New 09/11 - Calculate Y position
# Calculate the y_offset dynamically based on the 'Quantity Value'
#melted_df['y_position'] = melted_df['Quantity Value'] + melted_df['Quantity Value']*0.1
# Compute the maximum value of Quantity Value
max_quantity_value = melted_df['Quantity Value'].max()
# Calculate the 5% offset of the maximum value
offset = max_quantity_value * 0.05
# Define the function to calculate y_position with the conditional offset
def calculate_y_position(quantity_value):
if quantity_value >= 0:
return quantity_value + offset
else:
return offset # Apply offset in the opposite direction for negative values
# Apply the function to the DataFrame
melted_df['y_position'] = melted_df['Quantity Value'].apply(calculate_y_position)
#print('melted_df')
#display(melted_df)
plot = melted_df.hvplot.bar(
x='Pty Indice',
y='Quantity Value',
by='Quantity Type',
color='Quantity Type',
cmap=custom_palette14bis,
#title='IDD Total Sales & IDD Marge per Pty Indice by Top-Level Status, Production Status & Product Category',
xlabel='Pty Indice',
ylabel='[K$]',
legend='top_right',
stacked=False,
bar_width=0.6, # Set bar width - 09/12
#padding=1,
tools=[],
).opts(
xrotation=90,
)
updated_bokeh_plot = hv.render(plot, backend='bokeh')
updated_bokeh_plot.tools = [tool for tool in updated_bokeh_plot.tools if not isinstance(tool, HoverTool)]
hover = HoverTool()
hover.tooltips = [
("Pty Indice", "@Pty_Indice"),
("KPI", "@color"),
("Quantity Value", "@Quantity_Value{($0,0k)}") # Format values: thousands with 'K' # Quantity_Value with the '_' otherwise that does not work!
]
updated_bokeh_plot.add_tools(hover)
# Remove wheel zoom from active tools if you want it inactive by default - 08/12
#updated_bokeh_plot.tools = [tool for tool in updated_bokeh_plot.tools if not isinstance(tool, WheelZoomTool)]
# 09/12 - Set wheel woom inactive
updated_bokeh_plot.toolbar.active_scroll = None
updated_bokeh_plot.xaxis.major_label_text_font_size = '0pt'
updated_bokeh_plot.yaxis.major_label_text_font_size = '10pt'
#updated_bokeh_plot.title.text_font_size = '8pt'
#updated_bokeh_plot.title.text_color = "#002570"
updated_bokeh_plot.xaxis.axis_line_width = 2
updated_bokeh_plot.yaxis.axis_line_width = 2
updated_bokeh_plot.xaxis.major_label_orientation = 'vertical'
updated_bokeh_plot.yaxis.major_label_orientation = 'horizontal'
updated_bokeh_plot.yaxis.axis_label_text_font_size = '10pt'
updated_bokeh_plot.xgrid.grid_line_color = None
updated_bokeh_plot.ygrid.grid_line_color = '#F2F2F2'
updated_bokeh_plot.ygrid.grid_line_dash = [6, 4]
updated_bokeh_plot.y_range = Range1d(start=min_y_bound14bis, end=max_y_bound14bis)
updated_bokeh_plot.toolbar.logo = None
updated_bokeh_plot.legend.label_text_font_size = '8pt' # Set the font size of legend text
# Add custom formatted title
updated_bokeh_plot.add_layout(Title(
text="Financial KPI",
align='center',
text_font_size='10pt', # Adjust font size for the title
text_color="#002570" # Adjust color for the title
), 'above')
# Format the y-axis ticks in thousands with a dollar sign
updated_bokeh_plot.yaxis.formatter =CustomJSTickFormatter(code="""
return '$' + (tick / 1000).toFixed(0) + 'k';
""")
#Format the y-label to display on the graph
melted_df['formatted_labels'] = melted_df['Quantity Value'].apply(lambda x: f"${x / 1000:.0f}k")
# Add labels on top of the bars
source = ColumnDataSource(melted_df)
labels = LabelSet(
x= 'x_combined',
y='y_position',
#x= 'Quantity Type',
#y='Quantity Value',
#text='Quantity Value',
text='formatted_labels',
level='glyph',
source=source,
text_font_size='8pt',
text_font_style='bold', # Set the font style to bold
text_align='center',
#text_baseline='bottom', # Place labels above the bars #09/11
#y_offset=5, #09/11
text_color={'field': 'Quantity Type', 'transform': CategoricalColorMapper(
factors=unique_quantity_types, palette=[custom_palette14bis[qtype] for qtype in unique_quantity_types]
)}
)
updated_bokeh_plot.add_layout(labels)
return updated_bokeh_plot
#//////////////////////////////////////////////////
###################################################
# create_plot_14bbis
###################################################
def create_plot_14bbis():
selected_program = program_widget.value
selected_priority = priority_widget.value
selected_indice = indice_widget.value
mask = pd.Series(True, index=pivot_table_14_2.index)
if selected_program != 'All':
mask &= (pivot_table_14_2['Program'] == selected_program)
if selected_priority != 'All':
mask &= (pivot_table_14_2['Priority'] == selected_priority)
if selected_indice != 'All':
mask &= (pivot_table_14_2['Pty Indice'] == selected_indice)
filtered_df = pivot_table_14_2.loc[mask].copy()
#####################################################################
# Dynamic y bounds for create_plot_14bbis
#####################################################################
if filtered_df.empty:
return create_placeholder_plot('No data available for the selected filters.')
max_margin_value14bbis = filtered_df[['IDD Current Margin (%)', '% Completion', '% DPAS Order', 'IDD Expected ROI (Total)']].max().max()
min_margin_value14bbis = filtered_df[['IDD Current Margin (%)', '% Completion', '% DPAS Order', 'IDD Expected ROI (Total)']].min().min()
max_y_bound14bbis = max_margin_value14bbis * 2
# Add 10% to min value if any of the values are negative, otherwise set to 0
min_y_bound14bbis = min_margin_value14bbis * 2 if min_margin_value14bbis < 0 else 0
melted_df = filtered_df.melt(id_vars=['Pty Indice'],
value_vars=['IDD Current Margin (%)', '% Completion', '% DPAS Order', 'IDD Expected ROI (Total)'],
var_name='Quantity Type', value_name='Quantity Value')
#############################################
# Set Order of melted_df --> Order of y label
#############################################
# Define the order of categories for 'Quantity Type'
unique_quantity_types = ['IDD Expected ROI (Total)', '% DPAS Order', '% Completion', 'IDD Current Margin (%)']
# Convert 'Quantity Type' to a categorical type with the defined order
melted_df['Quantity Type'] = pd.Categorical(melted_df['Quantity Type'], categories=unique_quantity_types, ordered=True)
####################################################
# Define unique indices and calculate x_combined
##################################################
# New code 08/07
# Define constants
unique_indices = melted_df['Pty Indice'].astype('category').cat.categories
unique_quantity_types = melted_df['Quantity Type'].astype('category').cat.categories
num_types = len(unique_quantity_types) # Number of bars per 'Pty Indice'
num_indice = len(unique_indices) # Number of selected 'Pty Indice'
# Generate base_positions based on enumerate(unique_indices)
base_positions = {indice: i * (num_indice + 1) for i, indice in enumerate(unique_indices)}
#define gaps -- The gap is suppose to change based on the number of num_indice (for num_indice = 4 -- gap = 0.5 works well)
#gap = 0.5
gap = 1/(num_indice/2)
# Create a mapping of 'Pty Indice' to its index, starting from 0
indice_mapping = {indice: i for i, indice in enumerate(unique_indices)}
# Calculate x_combined for the bar positions
def calculate_x_combined(row):
pty_indice = row['Pty Indice']
quantity_type_code = melted_df['Quantity Type'].cat.codes[row.name]
# Get the index of the current 'Pty Indice'
indice = indice_mapping[pty_indice]
x_combined = base_positions[pty_indice] + quantity_type_code + 1/(num_indice + 1) + gap*indice #Calculation of the gap is yet to be refined as it does not work for all cases when Pty indice > 4
return x_combined
# Apply the function to calculate x_combined
melted_df['x_combined'] = melted_df.apply(calculate_x_combined, axis=1)
#####################################################
#New 09/11 - Calculate Y position
# Compute the maximum value of Quantity Value
max_quantity_value = melted_df['Quantity Value'].max()
# Calculate the 5% offset of the maximum value
offset = max_quantity_value * 0.05
# Define the function to calculate y_position with the conditional offset
def calculate_y_position(quantity_value):
if quantity_value >= 0:
return quantity_value + offset
else:
return offset # Apply offset in the opposite direction for negative values
# Apply the function to the DataFrame
melted_df['y_position'] = melted_df['Quantity Value'].apply(calculate_y_position)
plot = melted_df.hvplot.bar(
x='Pty Indice',
y='Quantity Value',
by='Quantity Type',
color='Quantity Type',
cmap=custom_palette14bbis,
#title='IDD % Margin per Pty Indice by Top-Level Status, Production Status & Product Category',
xlabel='Pty Indice',
ylabel='IDD % Margin',
legend='top_right',
stacked=False,
bar_width=0.6, # Set bar width - 09/12
#padding=0.1,
tools=[],
).opts(
xrotation=90,
)
updated_bokeh_plot = hv.render(plot, backend='bokeh')
updated_bokeh_plot.tools = [tool for tool in updated_bokeh_plot.tools if not isinstance(tool, HoverTool)]
hover = HoverTool()
hover.tooltips = [
("Pty Indice", "@Pty_Indice"),
("KPI", "@color"),
("Value", "@Quantity_Value%"), # 08/09
]
updated_bokeh_plot.add_tools(hover)
updated_bokeh_plot.xaxis.major_label_text_font_size = '0pt'
updated_bokeh_plot.yaxis.major_label_text_font_size = '10pt'
#updated_bokeh_plot.title.text_font_size = '8pt'
#updated_bokeh_plot.title.text_color = "#002570"
updated_bokeh_plot.xaxis.axis_line_width = 2
updated_bokeh_plot.yaxis.axis_line_width = 2
updated_bokeh_plot.xaxis.major_label_orientation = 'vertical'
updated_bokeh_plot.yaxis.major_label_orientation = 'horizontal'
updated_bokeh_plot.yaxis.axis_label_text_font_size = '10pt'
updated_bokeh_plot.xgrid.grid_line_color = None
updated_bokeh_plot.ygrid.grid_line_color = '#F2F2F2'
updated_bokeh_plot.ygrid.grid_line_dash = [6, 4]
updated_bokeh_plot.y_range = Range1d(start=min_y_bound14bbis, end=max_y_bound14bbis)
updated_bokeh_plot.toolbar.logo = None
updated_bokeh_plot.legend.label_text_font_size = '8pt' # Set the font size of legend text
# Add custom formatted title
updated_bokeh_plot.add_layout(Title(
text="IDD % Margin, % Completion, % DPAS Order \n& Expected ROI per Pty Indice",
align='center',
text_font_size='10pt', # Adjust font size for the title
text_color="#002570" # Adjust color for the title
), 'above')
# Format y-axis ticks as percentages
updated_bokeh_plot.yaxis.formatter =CustomJSTickFormatter(code="""
return (tick).toFixed(0) + '%';
""")
# Format labels to include percentage sign
melted_df['formatted_labels'] = melted_df['Quantity Value'].apply(lambda x: f"{x:.0f}%")
# Add labels on top of the bars
source = ColumnDataSource(melted_df)
labels = LabelSet(
x= 'x_combined',
y = 'y_position', # Use y_position for the vertical position of the labels
#x= 'Quantity Type',
#y='Quantity Value',
#text='Quantity Value',
text='formatted_labels',
level='glyph',
source=source,
text_font_size='8pt',
text_font_style='bold', # Set the font style to bold
text_align='center',
#text_baseline='bottom', # Place labels above the bars
#text_baseline='text_baseline', # Use text_baseline for dynamic alignment #09/11 not working
#y_offset=5,
#y_offset='y_offset', #09/11 not working
text_color={'field': 'Quantity Type', 'transform': CategoricalColorMapper(
factors=unique_quantity_types, palette=[custom_palette14bbis[qtype] for qtype in unique_quantity_types]
)}
)
updated_bokeh_plot.add_layout(labels)
return updated_bokeh_plot
###############
# Inital call
###############
plot_pane_14bis = pn.pane.Bokeh(create_plot_14bis())
plot_pane_14bis_2 = pn.pane.Bokeh(create_plot_14bis_2())
plot_pane_14bbis = pn.pane.Bokeh(create_plot_14bbis())
##############################################################
# Update methods to include messages when no data is available
###############################################################
def update_plot_14bis(event):
plot_pane_14bis.object = create_plot_14bis()
def update_plot_14bis_2(event):
plot_pane_14bis_2.object = create_plot_14bis_2()
def update_plot_14bbis(event):
plot_pane_14bbis.object = create_plot_14bbis()
#New 08/28
#######################################################################################
# Create plot_15bis base on pivot_table_15 with production_table_pane attacehd to it
#######################################################################################
# Custom color palette for the new plot
custom_palette15bis = {
"Standard Time (Routing, full ASSY)": "#6699FF", # Blue for Standard Time
"Actual Time (AVG Prod, full ASSY)": "#A2C075", # Green for actual time
"Standard Deviation (on Actual Time, full ASSY)": "#FF5733", # Orange for standard deviation
"Actual Time (AVG Prod, Top-Level only)": "#63BE7B", # bleu
}
#//////////////////////////////////////////////////
###################################################
# create_plot_15bis
###################################################
def create_plot_15bis():
selected_program = program_widget.value
selected_priority = priority_widget.value
selected_indice = indice_widget.value
mask = pd.Series(True, index=pivot_table_15.index)
if selected_program != 'All':
mask &= (pivot_table_15['Program'] == selected_program)
if selected_priority != 'All':
mask &= (pivot_table_15['Priority'] == selected_priority)
if selected_indice != 'All':
mask &= (pivot_table_15['Pty Indice'] == selected_indice)
filtered_df = pivot_table_15.loc[mask].copy()
if filtered_df.empty:
return create_placeholder_plot('No data available for the selected filters.')
#####################################################################
# Dynamic y bounds for create_plot_14bbis
#####################################################################
max_time_value15bis = filtered_df[['Standard Time (Routing, full ASSY)', 'Actual Time (AVG Prod, full ASSY)', 'Standard Deviation (on Actual Time, full ASSY)', 'Actual Time (AVG Prod, Top-Level only)']].max().max()
min_time_value15bis = filtered_df[['Standard Time (Routing, full ASSY)', 'Actual Time (AVG Prod, full ASSY)', 'Standard Deviation (on Actual Time, full ASSY)', 'Actual Time (AVG Prod, Top-Level only)']].min().min()
max_y_bound15bis = max_time_value15bis * 2
min_y_bound15bis = min_time_value15bis * 2 if min_time_value15bis < 0 else 0
# Melt the dataframe to reshape it for the plot
melted_df = filtered_df.melt(
id_vars=['Pty Indice'],
#value_vars=['Standard Time (Routing, full ASSY)', 'Actual Time (AVG Prod, full ASSY)', 'Standard Deviation (on Actual Time, full ASSY)', 'Actual Time (AVG Prod, Top-Level only)'],
value_vars=['Standard Time (Routing, full ASSY)', 'Actual Time (AVG Prod, full ASSY)', 'Standard Deviation (on Actual Time, full ASSY)'], # Without 'Actual Time (AVG Prod, Top-Level only)'
var_name='Time Type',
value_name='Time Value'
)
#############################################
# Set Order of melted_df --> Order of y label
#############################################
# Define the order for 'Time Type'
#unique_time_types = ['Actual Time (AVG Prod, Top-Level only)', 'Standard Deviation (on Actual Time, full ASSY)' , 'Actual Time (AVG Prod, full ASSY)', 'Standard Time (Routing, full ASSY)']
unique_time_types = ['Standard Deviation (on Actual Time, full ASSY)' , 'Actual Time (AVG Prod, full ASSY)', 'Standard Time (Routing, full ASSY)'] # Without 'Actual Time (AVG Prod, Top-Level only)'
melted_df['Time Type'] = pd.Categorical(melted_df['Time Type'], categories=unique_time_types, ordered=True)
####################################################
# Define unique indices and calculate x_combined
##################################################
# New code 08/07
# Define constants
unique_indices = melted_df['Pty Indice'].astype('category').cat.categories
unique_time_types = melted_df['Time Type'].astype('category').cat.categories
num_types = len(unique_time_types) # Number of bars per 'Pty Indice'
num_indice = len(unique_indices) # Number of selected 'Pty Indice'
# Generate base_positions based on enumerate(unique_indices)
base_positions = {indice: i * (num_indice + 1) for i, indice in enumerate(unique_indices)}
#define gaps -- The gap is suppose to change based on the number of num_indice (for num_indice = 4 -- gap = 0.5 works well)
#gap = 0.5
gap = 1/(num_indice/2)
# Create a mapping of 'Pty Indice' to its index, starting from 0
indice_mapping = {indice: i for i, indice in enumerate(unique_indices)}
# Calculate x_combined for the bar positions
def calculate_x_combined(row):
pty_indice = row['Pty Indice']
time_type_code = melted_df['Time Type'].cat.codes[row.name]
# Get the index of the current 'Pty Indice'
indice = indice_mapping[pty_indice]
x_combined = base_positions[pty_indice] + time_type_code + 1/(num_indice + 1) + gap*indice #Calculation of the gap is yet to be refined as it does not work for all cases when Pty indice > 4
return x_combined
# Apply the function to calculate x_combined
melted_df['x_combined'] = melted_df.apply(calculate_x_combined, axis=1)
#####################################################
# Create the plot for Graph 15bis
plot_15bis = melted_df.hvplot.bar(
x='Pty Indice',
y='Time Value',
by='Time Type',
color='Time Type',
#title='Standard Time VS Actual Time',
xlabel='Pty Indice',
ylabel='Time [hours]',
cmap=custom_palette15bis,
legend='top_right',
stacked=False,
bar_width=0.6, # Set bar width - 09/12
#padding=1,
tools=[],
).opts(xrotation=90)
# Customize the Bokeh plot
bokeh_plot_15bis = hv.render(plot_15bis, backend='bokeh')
bokeh_plot_15bis.tools = [tool for tool in bokeh_plot_15bis.tools if not isinstance(tool, HoverTool)]
hover = HoverTool()
hover.tooltips = [
("Pty Indice", "@Pty_Indice"),
("Time Type", "@color"),
("Time Value", "@Time_Value{0.0} hours"),
]
bokeh_plot_15bis.add_tools(hover)
# 09/12 - Set wheel woom inactive
bokeh_plot_15bis.toolbar.active_scroll = None
# Further customizations
bokeh_plot_15bis.xaxis.major_label_text_font_size = '0pt'
bokeh_plot_15bis.yaxis.major_label_text_font_size = '10pt'
bokeh_plot_15bis.title.text_font_size = '10pt'
bokeh_plot_15bis.title.text_color = "#002570"
bokeh_plot_15bis.xaxis.axis_line_width = 2
bokeh_plot_15bis.yaxis.axis_line_width = 2
bokeh_plot_15bis.xaxis.major_label_orientation = 'vertical'
bokeh_plot_15bis.yaxis.major_label_orientation = 'horizontal'
bokeh_plot_15bis.yaxis.axis_label_text_font_size = '10pt'
bokeh_plot_15bis.xgrid.grid_line_color = None
bokeh_plot_15bis.ygrid.grid_line_color = '#F2F2F2'
bokeh_plot_15bis.ygrid.grid_line_dash = [6, 4]
bokeh_plot_15bis.toolbar.logo = None
bokeh_plot_15bis.y_range = Range1d(start=min_y_bound15bis, end=max_y_bound15bis)
bokeh_plot_15bis.legend.label_text_font_size = '8pt'
# Add custom formatted title
bokeh_plot_15bis.add_layout(Title(
text="Production KPI",
align='center',
text_font_size='10pt',
text_color="#002570"
), 'above')
# Add labels on top of the bars
source = ColumnDataSource(melted_df)
labels = LabelSet(
x='x_combined',
y='Time Value',
text='Time Value',
level='glyph',
source=source,
text_font_size='8pt',
text_font_style='bold', # Set the font style to bold
text_align='center',
text_baseline='bottom',
y_offset=5,
text_color={'field': 'Time Type', 'transform': CategoricalColorMapper(
factors=unique_time_types, palette=[custom_palette15bis[ttype] for ttype in unique_time_types]
)}
)
bokeh_plot_15bis.add_layout(labels)
return bokeh_plot_15bis
# Initial call
plot_pane_15bis = pn.pane.Bokeh(create_plot_15bis())
# Update method
def update_plot_15bis(event):
plot_pane_15bis.object = create_plot_15bis()
# 08/29
#///////////////////////////////////////
########################################
# Create table related to Graph-15
########################################
#///////////////////////////////////////
# Update 09/10 WIP --> Include 'Total Top-Level Qty' and 'Total Components Qty' in he table 'Top-Level WO Count'*'Qty per WO' and 'Total WO Count'*'Total Components Qty'
# Table containing 'Pty Indice', 'Total WO Count' and 'Top-Level WO Count' based on the widget (Program, Priority, Pty Indice) selection of |Products Status|
pivot_table_15_2 = pivot_table_15.copy()
def create_production_table_by_pty_indice(df):
return df[['Pty Indice', 'Total WO Count', 'Top-Level WO Count', 'Total Top-Level Qty', 'Total sub-Level Qty']]
# Define the function to update the production table based on widget values
def update_production_table_by_pty_indice(event=None):
selected_program = program_widget.value
selected_priority = priority_widget.value
selected_indice = indice_widget.value
mask = pd.Series(True, index=pivot_table_15_2.index)
if selected_program != 'All':
mask &= (pivot_table_15_2['Program'] == selected_program)
if selected_priority != 'All':
mask &= (pivot_table_15_2['Priority'] == selected_priority)
if selected_indice != 'All':
mask &= (pivot_table_15_2['Pty Indice'] == selected_indice)
filtered_df = pivot_table_15_2.loc[mask].copy()
if filtered_df.empty:
updated_table = create_placeholder_plot('No data available for the selected filters.')
else:
updated_table = create_production_table_by_pty_indice(filtered_df)
# Update the production table pane
production_table_by_pty_indice_pane.object = updated_table
# Initialize the production table with default values
initial_production_table_by_pty_indice = create_production_table_by_pty_indice(pivot_table_15_2)
production_table_by_pty_indice_pane = pn.pane.DataFrame(initial_production_table_by_pty_indice, width=420, index=False) #09/10
# Initial table setup
update_production_table_by_pty_indice()
# Attach the update function to widget value changes
program_widget.param.watch(update_production_table_by_pty_indice, 'value')
priority_widget.param.watch(update_production_table_by_pty_indice, 'value')
indice_widget.param.watch(update_production_table_by_pty_indice, 'value')
##################################################################################################################
# 09/25 - Create a second panda datafram related to Graph 15 with data from df_Snapshot related to Production KPI
##################################################################################################################
# The table should contain 'Pty Indice', 'Actual vs Standard time [%]', 'Deviation vs Actual [%]'
# Apply color formating on 'Deviation vs Actual [%]' : green if < 30%, orange if 30 to 50% and red if > 50%
# Step 1: Create a copy of df_Snapshot and rename the columns
df_Snapshot_prod_KPI = df_Snapshot.copy()
df_Snapshot_prod_KPI.rename(columns={
'Actual vs Standard time [%]': 'Standard time to Actual time [%]',
}, inplace=True)
# Need to keep 'Priority', 'Program' for the widget to work
relevant_columns_KPI = ['Pty Indice', 'Priority', 'Program',
'Standard time to Actual time [%]', 'Deviation vs Actual [%]']
df_Snapshot_prod_KPI = df_Snapshot_prod_KPI[relevant_columns_KPI]
# Convert percentage strings to float in numeric DataFrame
def convert_percentage_columns(df, percentage_columns):
for col in percentage_columns:
df[col] = (
df[col]
.str.replace('%', '', regex=False)
.astype(float) / 100
)
return df
# Step 3: Create a numeric DataFrame for percentage calculations
df_Snapshot_KPI_numeric = df_Snapshot_prod_KPI.copy()
percentage_columns_KPI = ['Standard time to Actual time [%]', 'Deviation vs Actual [%]']
df_Snapshot_KPI_numeric = convert_percentage_columns(df_Snapshot_KPI_numeric, percentage_columns_KPI)
# Step 4: Function to create a color dictionary
def create_color_dictionary(df, column):
color_dict = {}
for value in df[column]:
try:
# Handle both string and float types
if isinstance(value, str):
num_value = float(value.replace('%', '')) / 100
else:
num_value = value # Assume it's already a float
# Determine color based on value ranges
if num_value < 0.3:
color_dict[num_value] = 'green'
elif 0.3 <= num_value < 0.5:
color_dict[num_value] = 'orange'
else:
color_dict[num_value] = 'red'
except ValueError:
color_dict[value] = 'black' # Handle non-convertible values
return color_dict
# Create a color dictionary for 'Deviation vs Actual [%]'
color_mapping = create_color_dictionary(df_Snapshot_prod_KPI, 'Deviation vs Actual [%]')
# Function to apply color formatting to the DataFrame
def apply_color_formatting_prod_KPI(df, color_dict, column):
"""Apply conditional color formatting to a specified column in the DataFrame."""
def color_deviation(val):
"""Return the corresponding color based on the value."""
try:
# Handle both string and float types
if isinstance(val, str):
num_value = float(val.replace('%', '')) / 100
else:
num_value = val # Assume it's already a float
color = color_dict.get(num_value, 'black') # Default to black if not found
except ValueError:
color = 'black' # Handle conversion failure
return f'color: {color}'
# Create a styled DataFrame with color formatting
styled_df = df.style.applymap(color_deviation, subset=[column])
# Center text in both header and cells
styled_df.set_table_styles(
[
{'selector': 'th', 'props': [('text-align', 'center')]}, # Center headers
{'selector': 'td', 'props': [('text-align', 'center')]} # Center cells
]
)
return styled_df
# Prepare the display DataFrame
df_display = df_Snapshot_prod_KPI.copy()
# Apply the conditional color formatting to the 'Deviation vs Actual [%]' column
styled_df = apply_color_formatting_prod_KPI(df_display, color_mapping, 'Deviation vs Actual [%]')
# Update the KPI table function
def update_kpi_table_prod(event):
"""Update the KPI table based on selected filters."""
selected_program = program_widget.value
selected_priority = priority_widget.value
selected_indice = indice_widget.value
# Create a mask for filtering
mask = pd.Series(True, index=df_Snapshot_prod_KPI.index)
if selected_program != 'All':
mask &= (df_Snapshot_prod_KPI['Program'] == selected_program)
if selected_priority != 'All':
mask &= (df_Snapshot_prod_KPI['Priority'] == selected_priority)
if selected_indice != 'All':
mask &= (df_Snapshot_prod_KPI['Pty Indice'] == selected_indice)
# Filter the DataFrame
filtered_df = df_Snapshot_prod_KPI.loc[mask].copy().reset_index(drop=True)
# Drop 'Priority' and 'Program' from the filtered DataFrame for display
filtered_df_display = filtered_df.drop(columns=['Priority', 'Program'])
# Apply the conditional color formatting to the 'Deviation vs Actual [%]' column
filtered_styled_df = apply_color_formatting_prod_KPI(filtered_df_display, color_mapping, 'Deviation vs Actual [%]').hide(axis='index')
# Update the KPI table pane
kpi_table_pane_prod.object = filtered_styled_df
# Create the initial styled DataFrame without index
def create_styled_dataframe(df):
"""Create a styled DataFrame without the index."""
return apply_color_formatting_prod_KPI(df, color_mapping, 'Deviation vs Actual [%]').hide(axis='index')
# Initialize the KPI table pane
kpi_table_pane_prod = pn.pane.DataFrame(
create_styled_dataframe(df_Snapshot_prod_KPI.copy().reset_index(drop=True)), # Create a styled DataFrame
width=420,
)
# Initialize update_kpi_table_prod
update_kpi_table_prod(None)
# Attach update function to widget changes
program_widget.param.watch(update_kpi_table_prod, 'value')
priority_widget.param.watch(update_kpi_table_prod, 'value')
indice_widget.param.watch(update_kpi_table_prod, 'value')
#####################################################################################
# 09/24 - Create a panda datafram to summazize the production KPI from df_Snapashot
#####################################################################################
# First rename 'Critical Qty' to 'Critical Qty Initial' and 'IDD Production Cost (unit)' to 'IDD current cost (unit)'
# The table_production_KPI contain: 'Pty Indice', 'IDD Marge Standard (unit)', 'IDD Sale Price', 'IDD Current Margin (%)', 'IDD current cost (unit)', 'IDD AVG realized Margin [%]', 'IDD Corrected Margin [%]', 'Critical Qty Initial'
# Color in darf green font when 'IDD AVG realized Margin [%]', 'IDD Current Margin (%)', 'IDD Corrected Margin [%]' are positive and in red font when negative
# Make sure that the following column are currency (USD): 'IDD Marge Standard (unit)', 'IDD Sale Price', 'IDD current cost (unit)',
# Make sure that the following column are percentage (%): 'IDD AVG realized Margin [%]', 'IDD Current Margin (%)', 'IDD Corrected Margin [%]'
# Step 1: Create a copy of df_Snapshot and rename the columns
df_Snapshot_prod = df_Snapshot.copy()
df_Snapshot_prod.rename(columns={
'Critical Qty': 'Critical Qty (Initial)',
'IDD Production Cost (unit)': 'IDD current cost (per unit)',
'IDD Marge Standard (unit)': 'IDD Margin Standard (per unit)',
'IDD Current Margin (%)': 'IDD Current Margin [%]'
}, inplace=True)
# Step 2: Keep only the necessary columns
relevant_columns = ['Pty Indice', 'Priority', 'Program',
'IDD Margin Standard (per unit)', 'IDD Sale Price',
'IDD Current Margin [%]', 'IDD current cost (per unit)',
'IDD AVG realized Margin [%]', 'IDD Corrected Margin [%]',
'Critical Qty (Initial)']
df_Snapshot_prod = df_Snapshot_prod[relevant_columns]
# Step 3: Create a numeric DataFrame for percentage calculations
df_Snapshot_numeric = df_Snapshot_prod.copy()
percentage_columns = ['IDD Current Margin [%]', 'IDD AVG realized Margin [%]', 'IDD Corrected Margin [%]']
# Convert percentage strings to float in numeric DataFrame
def convert_percentage_columns(df, percentage_columns):
for col in percentage_columns:
df[col] = (
df[col]
.str.replace('%', '', regex=False)
.astype(float) / 100
)
return df
df_Snapshot_numeric = convert_percentage_columns(df_Snapshot_numeric, percentage_columns)
# --> WIP 10/07 <--
# Step 4: Create a color mapping for percentage columns
def create_color_mapping_percentage(df, columns):
"""Create a color mapping for multiple percentage columns based on value ranges."""
color_dict = {}
for column in columns:
for idx, value in df[column].items():
try:
num_value = float(value) # Assume value is already a float
# Define the color based on value ranges
if num_value > 0:
color_dict[(idx, column)] = 'green'
elif num_value < 0:
color_dict[(idx, column)] = 'red'
else:
color_dict[(idx, column)] = 'black'
except ValueError:
color_dict[(idx, column)] = 'black' # Fallback for non-convertible values
return color_dict
# Create color mappings for the relevant percentage columns
percentage_columns = ['IDD Current Margin [%]', 'IDD AVG realized Margin [%]', 'IDD Corrected Margin [%]']
color_mapping_margin = create_color_mapping_percentage(df_Snapshot_numeric, percentage_columns)
# Step 5: Function to apply color formatting
def apply_color_formatting_margin(df, color_dict):
"""Apply conditional color formatting to the percentage columns in the DataFrame."""
def color_deviation(val, idx, col):
"""Return the corresponding color based on the value."""
try:
# Get color for the (index, column) pair
color = color_dict.get((idx, col), 'black')
except ValueError:
color = 'black' # Handle conversion failure
return f'color: {color}'
# Create a styled DataFrame with color formatting for the relevant percentage columns
styled_df = df.style.apply(
lambda x: [color_deviation(x[col], x.name, col) for col in df.columns], axis=1
)
# Center text in both header and cells
styled_df.set_table_styles(
[
{'selector': 'th', 'props': [('text-align', 'center')]}, # Center headers
{'selector': 'td', 'props': [('text-align', 'center')]} # Center cells
]
)
return styled_df
# Step 6: Function to format currency values
def format_currency(value):
"""Format a number as currency with one decimal place."""
return f"${value:,.1f}"
# Update the KPI table function
def update_kpi_table(event):
"""Update the KPI table based on selected filters."""
selected_program = program_widget.value
selected_priority = priority_widget.value
selected_indice = indice_widget.value
# Create a mask for filtering
mask = pd.Series(True, index=df_Snapshot_prod.index)
if selected_program != 'All':
mask &= (df_Snapshot_prod['Program'] == selected_program)
if selected_priority != 'All':
mask &= (df_Snapshot_prod['Priority'] == selected_priority)
if selected_indice != 'All':
mask &= (df_Snapshot_prod['Pty Indice'] == selected_indice)
filtered_df = df_Snapshot_prod.loc[mask].copy().reset_index(drop=True)
filtered_numeric_df = df_Snapshot_numeric.loc[mask].copy().reset_index(drop=True)
# Apply currency formatting to the specified columns
currency_columns = ['IDD Margin Standard (per unit)', 'IDD Sale Price', 'IDD current cost (per unit)']
for col in currency_columns:
filtered_df[col] = filtered_df[col].apply(format_currency)
# Create a new color mapping for the filtered DataFrame
color_mapping_margin = create_color_mapping_percentage(filtered_numeric_df, percentage_columns)
# Apply the conditional color formatting to the relevant percentage columns
filtered_styled_df = apply_color_formatting_margin(filtered_df, color_mapping_margin).hide(axis='index')
# Update the KPI table pane
kpi_table_pane.object = filtered_styled_df
# Step 7: Create the initial styled DataFrame
def create_styled_dataframe(df):
"""Create a styled DataFrame without the index."""
return apply_color_formatting_margin(df, color_mapping_margin).hide(axis='index')
# Initialize the KPI table pane
kpi_table_pane = pn.pane.DataFrame(
create_styled_dataframe(df_Snapshot_prod.copy().reset_index(drop=True)), # Create a styled DataFrame
width=1250,
)
# Initialize and update KPI table
update_kpi_table(None)
# Attach update function to widget changes
program_widget.param.watch(update_kpi_table, 'value')
priority_widget.param.watch(update_kpi_table, 'value')
indice_widget.param.watch(update_kpi_table, 'value')
'''
# Step 1: Create a copy of df_Snapshot and rename the columns
df_Snapshot_prod = df_Snapshot.copy()
df_Snapshot_prod.rename(columns={
'Critical Qty': 'Critical Qty (Initial)',
'IDD Production Cost (unit)': 'IDD current cost (per unit)',
'IDD Marge Standard (unit)': 'IDD Margin Standard (per unit)',
'IDD Current Margin (%)': 'IDD Current Margin [%]'
}, inplace=True)
# Step 2: Keep only the necessary columns
relevant_columns = ['Pty Indice', 'Priority', 'Program',
'IDD Margin Standard (per unit)', 'IDD Sale Price',
'IDD Current Margin [%]', 'IDD current cost (per unit)',
'IDD AVG realized Margin [%]', 'IDD Corrected Margin [%]',
'Critical Qty (Initial)']
df_Snapshot_prod = df_Snapshot_prod[relevant_columns]
# Step 3: Create a numeric DataFrame for percentage calculations
df_Snapshot_numeric = df_Snapshot_prod.copy()
percentage_columns = ['IDD Current Margin [%]', 'IDD AVG realized Margin [%]', 'IDD Corrected Margin [%]']
# Convert percentage strings to float in numeric DataFrame
def convert_percentage_columns(df, percentage_columns):
for col in percentage_columns:
df[col] = (
df[col]
.str.replace('%', '', regex=False)
.astype(float) / 100
)
return df
df_Snapshot_numeric = convert_percentage_columns(df_Snapshot_numeric, percentage_columns)
# --> WIP 10/07 <--
# Step 4: Create a color mapping for percentage columns
def create_color_mapping_percentage(df, columns):
"""Create a color mapping for multiple percentage columns based on value ranges."""
color_dict = {}
for column in columns:
for idx, value in df[column].items():
try:
num_value = float(value) # Assume value is already a float
# Define the color based on value ranges
if num_value > 0:
color_dict[(idx, column)] = 'green'
elif num_value < 0:
color_dict[(idx, column)] = 'red'
else:
color_dict[(idx, column)] = 'black'
except ValueError:
color_dict[(idx, column)] = 'black' # Fallback for non-convertible values
return color_dict
# Create color mappings for the relevant percentage columns
percentage_columns = ['IDD Current Margin [%]', 'IDD AVG realized Margin [%]', 'IDD Corrected Margin [%]']
color_mapping_margin = create_color_mapping_percentage(df_Snapshot_numeric, percentage_columns)
# Step 5: Function to apply color formatting
def apply_color_formatting_margin(df, color_dict):
"""Apply conditional color formatting to the percentage columns in the DataFrame."""
def color_deviation(val, idx, col):
"""Return the corresponding color based on the value."""
try:
# Get color for the (index, column) pair
color = color_dict.get((idx, col), 'black')
except ValueError:
color = 'black' # Handle conversion failure
return f'color: {color}'
# Create a styled DataFrame with color formatting for the relevant percentage columns
styled_df = df.style.apply(
lambda x: [color_deviation(x[col], x.name, col) for col in df.columns], axis=1
)
# Center text in both header and cells
styled_df.set_table_styles(
[
{'selector': 'th', 'props': [('text-align', 'center')]}, # Center headers
{'selector': 'td', 'props': [('text-align', 'center')]} # Center cells
]
)
return styled_df
# Step 6: Function to format currency values
def format_currency(value):
"""Format a number as currency with one decimal place."""
return f"${value:,.1f}"
# Update the KPI table function
def update_kpi_table(event):
"""Update the KPI table based on selected filters."""
selected_program = program_widget.value
selected_priority = priority_widget.value
selected_indice = indice_widget.value
# Create a mask for filtering
mask = pd.Series(True, index=df_Snapshot_prod.index)
if selected_program != 'All':
mask &= (df_Snapshot_prod['Program'] == selected_program)
if selected_priority != 'All':
mask &= (df_Snapshot_prod['Priority'] == selected_priority)
if selected_indice != 'All':
mask &= (df_Snapshot_prod['Pty Indice'] == selected_indice)
filtered_df = df_Snapshot_prod.loc[mask].copy().reset_index(drop=True)
filtered_numeric_df = df_Snapshot_numeric.loc[mask].copy().reset_index(drop=True)
# Apply currency formatting to the specified columns
currency_columns = ['IDD Margin Standard (per unit)', 'IDD Sale Price', 'IDD current cost (per unit)']
for col in currency_columns:
filtered_df[col] = filtered_df[col].apply(format_currency)
# Apply the conditional color formatting to the relevant percentage columns
filtered_styled_df = apply_color_formatting_margin(filtered_df, color_mapping_margin).hide(axis='index')
# Update the KPI table pane
kpi_table_pane.object = filtered_styled_df
# Step 7: Create the initial styled DataFrame
def create_styled_dataframe(df):
"""Create a styled DataFrame without the index."""
return apply_color_formatting_margin(df, color_mapping_margin).hide(axis='index')
# Initialize the KPI table pane
kpi_table_pane = pn.pane.DataFrame(
create_styled_dataframe(df_Snapshot_prod.copy().reset_index(drop=True)), # Create a styled DataFrame
width=1250,
)
# Initialize and update KPI table
update_kpi_table(None)
# Attach update function to widget changes
program_widget.param.watch(update_kpi_table, 'value')
priority_widget.param.watch(update_kpi_table, 'value')
indice_widget.param.watch(update_kpi_table, 'value')
'''
#####################################################################################
# 09/26 - Create a panda datafram to summazize the backlog KPI from df_Snapashot
#####################################################################################
# Build a table with 5 columns: pivot_table_14['Pty Indice'], , pivot_table_14['Priority'], pivot_table_14['Program'], pivot_table_14['% Completion'], pivot_table_14['% DPAS Order']
# Display only 'Pty Indice', % Completion' and '% DPAS Order'
# Function to format numeric values as percentages with one decimal point
def format_percentage(value):
"""Format a numeric value as a percentage with 1 decimal point."""
return "{:.1f}%".format(value) # No multiplication, directly format the value
def create_backlog_table():
"""Build a backlog table from a copy of pivot_table_14."""
# Create a copy of pivot_table_14 to avoid modifying the original DataFrame
backlog_table = pivot_table_14.copy()
# Select the relevant columns
backlog_table = backlog_table[['Pty Indice', 'Priority', 'Program', '% Completion', '% Completion Total Backlog', '% DPAS Order']]
# Rename '% Completion' to '% Completion Critical Qty'
backlog_table.rename(columns={'% Completion': '% Completion Critical Qty'}, inplace=True)
# Return the backlog table without additional formatting for calculations
return backlog_table
# Initialize the backlog_table globally to be accessible elsewhere
backlog_table = create_backlog_table()
def update_filtered_backlog_table(event):
"""Update the backlog table based on widget filters."""
selected_program = program_widget.value
selected_priority = priority_widget.value
selected_indice = indice_widget.value
# Re-create a copy of the backlog table for filtering
backlog_table = create_backlog_table()
# Apply filters based on the widget values
mask = pd.Series(True, index=backlog_table.index)
if selected_program != 'All':
mask &= (backlog_table['Program'] == selected_program)
if selected_priority != 'All':
mask &= (backlog_table['Priority'] == selected_priority)
if selected_indice != 'All':
mask &= (backlog_table['Pty Indice'] == selected_indice)
# Filter the backlog table based on the mask
filtered_backlog_table = backlog_table.loc[mask].reset_index(drop=True) # Reset index here
# Display only the columns: 'Pty Indice', '% Completion', '% DPAS Order'
display_columns = ['Pty Indice', '% Completion Total Backlog', '% Completion Critical Qty', '% DPAS Order']
filtered_display_table = filtered_backlog_table[display_columns].copy()
# Format the percentage columns for display with one decimal point
filtered_display_table['% Completion Critical Qty'] = filtered_display_table['% Completion Critical Qty'].apply(format_percentage)
filtered_display_table['% Completion Total Backlog'] = filtered_display_table['% Completion Total Backlog'].apply(format_percentage)
filtered_display_table['% DPAS Order'] = filtered_display_table['% DPAS Order'].apply(format_percentage)
# Create and style the DataFrame directly
styled_filtered_table = create_styled_dataframe(filtered_display_table)
# Update the Panel DataFrame pane
backlog_table_pane.object = styled_filtered_table
def create_styled_dataframe(df):
"""Create a styled DataFrame without the index and center values."""
# Create a styled DataFrame
styled_df = df.style.hide(axis='index') # Hide the index
# Set table styles for centering text in both headers and data cells
styled_df.set_table_styles(
[
{
'selector': 'th, td', # Select both headers and data cells
'props': [('text-align', 'center')] # Center text
}
],
axis=0 # Applies the styles to all columns
)
return styled_df
# Initialize the backlog_table_pane with the styled backlog table
backlog_table_pane = pn.pane.DataFrame(
create_styled_dataframe(backlog_table.reset_index(drop=True)), # Create a styled DataFrame
width=500,
)
# Trigger the first update of the pane
update_filtered_backlog_table(None)
# Attach the update function to widget changes to update the backlog table automatically
program_widget.param.watch(update_filtered_backlog_table, 'value')
priority_widget.param.watch(update_filtered_backlog_table, 'value')
indice_widget.param.watch(update_filtered_backlog_table, 'value')
#################################################################################
# New 09/26 - Text boxes related to production KPI - Production KPI tables
###################################################################################
# Styles for the header and the card
header_styles = {
"background_color": "#9EC5E4", # Header color
"font_color": "white" # Text color for header
}
# Custom CSS for the card and header
custom_css = f"""
<style>
.custom-card {{
border: 2px solid #A9A9A9; /* Gray border */
border-radius: 10px; /* Rounded corners */
background-color: #ECF4FA; /* Light background inside the card */
box-shadow: 2px 2px 10px rgba(0, 0, 0, 0.1); /* Subtle shadow */
padding: 15px; /* Padding inside the card */
}}
.custom-card-header {{
background-color: {header_styles["background_color"]}; /* Header background */
color: {header_styles["font_color"]}; /* Header text color */
padding: 10px;
font-size: 18px;
text-align: center;
font-weight: bold;
border-top-left-radius: 10px; /* Rounded top corners */
border-top-right-radius: 10px; /* Rounded top corners */
}}
</style>
"""
# Headers text for the card
header_html_Prod = f"<div class='custom-card-header'>Production KPI overview</div>"
header_html_Finance = f"<div class='custom-card-header'> Overview of Financial KPIs and the influence of Production KPIs</div>"
header_html_Backlog = f"<div class='custom-card-header'>Backlog KPI overview</div>"
#/////////////////////////////////////////////////////////////////////////////////
######################################################
# Create text box 'textbox_production_table_by_pty_indice_pane' under the table production_table_by_pty_indice_pane
######################################################
# 'Qty Shipped' - 'Total Top-Level Qty' Top-Level are filtered-out from the calculation due to related WOs considerede abberant value.
# 'Total Top-Level Qty' Top-Level are thus considered for the calcualtion.
# The calculation is made based on 'Total WO Count' data-point representinf the number of WOs, in which 'Top-Level WO Count' represents Top-Level WOs.
#/////////////////////////////////////////////////////////////////////////////////
# Initialize global variables
top_filtered_out_qty = 0
total_top_level_qty = 0
top_wo_count = 0
total_wo_count = 0
# Function to update the 'Top-Level Qty filtered-out' quantity
def update_top_filtered_out_qty(filtered_df_snapshot, filtered_df):
global top_filtered_out_qty
top_filtered_out_qty = filtered_df_snapshot['Shipped'].sum() - filtered_df['Total Top-Level Qty'].sum()
# Function to update the 'Total Top-Level Qty' based on filtered data
def update_total_top_level_qty(filtered_df):
global total_top_level_qty
total_top_level_qty = filtered_df['Total Top-Level Qty'].sum() if 'Total Top-Level Qty' in filtered_df else 0
# Function to update the 'Total WO Count' based on filtered data
def update_total_wo_count(filtered_df):
global total_wo_count
total_wo_count = filtered_df['Total WO Count'].sum() if 'Total WO Count' in filtered_df else 0
# Function to update the 'Top-Level WO Count' based on filtered data
def update_top_wo_count(filtered_df):
global top_wo_count
top_wo_count = filtered_df['Top-Level WO Count'].sum() if 'Top-Level WO Count' in filtered_df else 0
# Initialize the textbox to display results
textbox_production_table_by_pty_indice_pane = pn.pane.Markdown("", sizing_mode='stretch_width')
# Function to update quantities and text box based on widget selections
def update_textbox(event):
selected_program = program_widget.value
selected_priority = priority_widget.value
selected_indice = indice_widget.value
# Initialize a boolean mask with all True values for pivot_table_15_2
mask = pd.Series(True, index=pivot_table_15_2.index)
# Apply filters based on selections for pivot_table_15_2
if selected_program != 'All':
mask &= (pivot_table_15_2['Program'] == selected_program)
if selected_priority != 'All':
mask &= (pivot_table_15_2['Priority'] == selected_priority)
if selected_indice != 'All':
mask &= (pivot_table_15_2['Pty Indice'] == selected_indice)
# Apply the mask to filter pivot_table_15_2
filtered_df = pivot_table_15_2.loc[mask].copy()
# Initialize a boolean mask for df_Snapshot
mask_snapshot = pd.Series(True, index=df_Snapshot.index)
# Apply filters based on selections for df_Snapshot
if selected_program != 'All':
mask_snapshot &= (df_Snapshot['Program'] == selected_program)
if selected_priority != 'All':
mask_snapshot &= (df_Snapshot['Priority'] == selected_priority)
if selected_indice != 'All':
mask_snapshot &= (df_Snapshot['Pty Indice'] == selected_indice)
# Filter df_Snapshot using the constructed mask
filtered_df_snapshot = df_Snapshot[mask_snapshot].copy()
# Update quantities based on the filtered DataFrames
update_top_filtered_out_qty(filtered_df_snapshot, filtered_df)
update_total_top_level_qty(filtered_df)
update_top_wo_count(filtered_df)
update_total_wo_count(filtered_df)
# Update the text box content with the actual values
textbox_production_table_by_pty_indice_pane.object = f"""
▷ <b>{top_filtered_out_qty}</b> Top-Level are filtered-out from the calculation. <br>
▷ <b>{total_top_level_qty}</b> Top-Level are considered for the calculation. <br>
▷ <b>{total_wo_count}</b> represents the number of WOs included in the calculation, with <b>{top_wo_count}</b> representing Top-Level WOs.
"""
# Attach the update function to widget value changes
program_widget.param.watch(update_textbox, 'value')
priority_widget.param.watch(update_textbox, 'value')
indice_widget.param.watch(update_textbox, 'value')
# Initial text box setup
update_textbox(None) # Call once to initialize with default values
#/////////////////////////////////////////////////////////////////////////////////
##################################################################################
# Create a text box 'textbox_kpi_table_prod' under the table kpi_table_prod
##################################################################################
# Standard deviation represents 'Deviation vs Actual [%]' of the Standard Time.
# Standard Time has to be incresed by 'Standard time to Actual time [%]' in order to reflect the Actual Time.
#/////////////////////////////////////////////////////////////////////////////////
# 'Deviation vs Actual [%]' and 'Standard time to Actual time [%]' are coming from
# Initialize global variables for the dynamic text
deviation_vs_actual = 0
standard_time_to_actual = 0
''' Update 10/07
# Function to convert percentage strings to numeric
def convert_percentage_to_numeric(percentage_str):
try:
return float(percentage_str.strip('%')) / 100 # Convert to decimal
except ValueError:
return None # Return None if conversion fails
'''
def convert_percentage_to_numeric(percentage):
"""Convert a percentage string to a numeric value (decimal)."""
if isinstance(percentage, str):
# Handle string input, stripping '%' and converting to decimal
return float(percentage.strip('%')) / 100
elif isinstance(percentage, (float, int)):
# Handle float or int input, already numeric
return percentage
else:
# Handle unexpected types, returning NaN or a default value
return float('nan') # Or return 0.0, depending on your requirement
# Function to update the dynamic values
def update_dynamic_values(filtered_df):
global deviation_vs_actual, standard_time_to_actual
# Convert columns to numeric values
if 'Deviation vs Actual [%]' in filtered_df:
filtered_df['Deviation vs Actual [%]'] = filtered_df['Deviation vs Actual [%]'].apply(convert_percentage_to_numeric)
deviation_vs_actual = filtered_df['Deviation vs Actual [%]'].mean() # Calculate mean
if 'Standard time to Actual time [%]' in filtered_df:
filtered_df['Standard time to Actual time [%]'] = filtered_df['Standard time to Actual time [%]'].apply(convert_percentage_to_numeric)
standard_time_to_actual = filtered_df['Standard time to Actual time [%]'].mean() # Calculate mean
# Create the dynamic text box for the KPI table
def update_textbox_kpi_table_prod(event):
selected_program = program_widget.value
selected_priority = priority_widget.value
selected_indice = indice_widget.value
# Create a mask for filtering df_Snapshot_prod_KPI
mask = pd.Series(True, index=df_Snapshot_prod_KPI.index)
if selected_program != 'All':
mask &= (df_Snapshot_prod_KPI['Program'] == selected_program)
if selected_priority != 'All':
mask &= (df_Snapshot_prod_KPI['Priority'] == selected_priority)
if selected_indice != 'All':
mask &= (df_Snapshot_prod_KPI['Pty Indice'] == selected_indice)
# Filter the DataFrame
filtered_df = df_Snapshot_prod_KPI.loc[mask].copy()
# Update dynamic values based on filtered DataFrame
update_dynamic_values(filtered_df)
# Update the text box content with the actual values
textbox_kpi_table_prod.object = f"""
▷ <b>Standard deviation</b> represents <b>{deviation_vs_actual:.0%}</b> of the Standard Time.
▷ <b>Standard Time</b> has to be increased by <b>{standard_time_to_actual:.0%}</b> to reflect the <b>Actual Time</b>.
"""
# Initialize the textbox to display results
textbox_kpi_table_prod = pn.pane.Markdown("", width=425)
# Attach the update function to widget value changes
program_widget.param.watch(update_textbox_kpi_table_prod, 'value')
priority_widget.param.watch(update_textbox_kpi_table_prod, 'value')
indice_widget.param.watch(update_textbox_kpi_table_prod, 'value')
# Initial text box setup
update_textbox_kpi_table_prod(None) # Call once to initialize with default values
#/////////////////////////////////////////////////////////////////////////////////
###################################################
# Create a text box 'textbox_kpi_table_finance' under the table 'kpi_table_pane'
####################################################
textbox_kpi_table_finance = pn.pane.Markdown(
"""
▷ <b>IDD Current Margin [%]</b>: Margin based on the current Standard Time and Sale Price <br>
▷ <b>IDD AVG realized Margin [%]</b>: Average Margin over time considering any potential change of price <br>
▷ <b>IDD Corrected Margin [%]</b>: Real Margin reflecting the potential difference between Standard Time and Actual Time. Calculated based on the average 2024 labor cost ($79.58/h). <br>
➥ The Corrected Margin is not exact, as it includes only the labor efficiency but does not account for other factors such as labor cost variance and material purchasing variances. However, it is more representative of the real margin than the IDD Current Margin, which does not consider the labor efficiency (Actual Time vs. Stanadrd Time).
""",
width=1270
)
#/////////////////////////////////////////////////////////////////////////////////
###################################################
# Create a text box 'textbox_kpi_table_finance' under the table 'kpi_table_pane'
####################################################
textbox_kpi_table_backlog = pn.pane.Markdown(
"""
▷ <b>% Completion Critical Qty</b>: Based on the critical quantity. It can be > 100%. <br>
▷ <b>% Completion Total</b>: Including follow-up orders.
""",
width=600
)
########################################################################################
#### Create the layout for the production card, including the header and other elements
########################################################################################
def create_kpi_summary_card():
# This includes the custom CSS and the header HTML
card_layout = pn.Column(
pn.pane.HTML(custom_css + header_html_Prod), # Apply custom styles and header
pn.Spacer(height=10), # Spacer for layout
pn.Row(
pn.Spacer(width=20),
pn.Column(
production_table_by_pty_indice_pane, # First KPI Table
pn.Spacer(height=5), # Space between elements
textbox_production_table_by_pty_indice_pane, # First text box
pn.Spacer(height=5), # Space between elements
kpi_table_pane_prod, # Second KPI Table
pn.Spacer(height=5), # Space between elements
textbox_kpi_table_prod, # Second text box
width=425,
),
),
css_classes=["custom-card"] # Apply the custom card styling here
)
return card_layout
# Create and display the KPI summary card
kpi_summary_card = create_kpi_summary_card()
########################################################################################
#### Create the layout for the Financial card, including the header and other elements
########################################################################################
def create_kpi_summary_card():
# This includes the custom CSS and the header HTML
card_layout = pn.Column(
pn.pane.HTML(custom_css + header_html_Finance), # Apply custom styles and header
pn.Spacer(height=10), # Spacer for layout
pn.Row(
pn.Spacer(width=20),
pn.Column(
kpi_table_pane, # Fianance Table
pn.Spacer(height=5), # Space between elements
textbox_kpi_table_finance, # First text box
),
),
css_classes=["custom-card"] # Apply the custom card styling here
)
return card_layout
# Create and display the KPI summary card
kpi_summary_card_finance = create_kpi_summary_card()
########################################################################################
#### Create the layout for the backlog card, including the header and other elements
########################################################################################
def create_kpi_summary_card_backlog():
# This includes the custom CSS and the header HTML
card_layout = pn.Column(
pn.pane.HTML(custom_css + header_html_Backlog), # Apply custom styles and header
pn.Spacer(height=10), # Spacer for layout
pn.Row(
pn.Spacer(width=20),
pn.Spacer(height=5),
pn.Column(
backlog_table_pane, # Backlog Table
pn.Spacer(height=5), # Space between elements
textbox_kpi_table_backlog, # First text box
),
),
css_classes=["custom-card"] # Apply the custom card styling here
)
return card_layout
# Create and display the KPI summary card
kpi_summary_card_backlog = create_kpi_summary_card_backlog()
###################################################################################
#//////////////////////////////////////////////////
########################################
# Create Graph13-13b-14-14b Dashboard
########################################
#//////////////////////////////////////////////////
# Set explicit width and height for each plot
plot_pane_13bis.width = 370
plot_pane_13bis.height = 450
plot_pane_13bbis.width = 370
plot_pane_13bbis.height = 450
plot_pane_14bis.width = 370
plot_pane_14bis.height = 450
plot_pane_14bis_2.width = 370
plot_pane_14bis_2.height = 450
# Not displayed
plot_pane_14bbis.width = 370
plot_pane_14bbis.height = 450
plot_pane_15bis.width = 370
plot_pane_15bis.height = 450
# Create vertical divier (gray vertical line for separation)
vertical_divider_med1 = pn.pane.HTML(
'<div style="width: 3px; height: 450px; background-color:#D9D9D9;"></div>',
)
vertical_divider_long = pn.pane.HTML(
'<div style="width: 3px; height: 770px; background-color:#D9D9D9;"></div>',
)
# Updated 09/26 - to integrate kpi_summary_card_backlog
# Define the layout for the plots and tables
combined_plots_layout = pn.Column(
pn.Row( # Main row to contain left and right columns with a vertical divider
pn.Column( # Left column for backlog graphs and backlog table
pn.Row( # First row with the first set of plots
plot_pane_13bis, # First plot
pn.Spacer(width=10), # Spacer
plot_pane_13bbis, # Second plot
),
pn.Spacer(height=50), # 10/07 20 --> 50
kpi_summary_card_backlog, # KPI summary for backlog below the second plot
pn.Spacer(height=10), # Optional vertical spacer between rows
),
vertical_divider_long, # Vertical divider separating the left and right columns
pn.Spacer(width=20),
pn.Column( # Right column for additional plots and KPI summaries
pn.Row( # Row for the third plot and KPI summary
plot_pane_15bis, # Third plot
pn.Spacer(width=10), # Spacer
kpi_summary_card, # KPI summary card
pn.Spacer(width=30), # Spacer
vertical_divider_med1, # Vertical divider for layout
pn.Spacer(width=30), # Spacer
plot_pane_14bis_2, # Last plot in this row
),
pn.Spacer(height=40), # 10/07
pn.Row( # Row for finance KPI card
pn.Spacer(height=20), # Optional spacer for vertical spacing
kpi_summary_card_finance # KPI finance table card
),
),
)
)
# Create a container with a max width constraint using pn.layout
container = pn.Column(
combined_plots_layout,
sizing_mode='stretch_width',
)
# Create a dashboard combining all plots with the title
combined_dashboard = pn.Column(
container,
sizing_mode='stretch_both' # Ensure the column stretches to fill available vertical space
)
##############################################################################################
# Initial call to update_widgets_and_table to populate the table based on default selections
#############################################################################################
production_dashboard = pn.Column(
pn.pane.HTML(f"""
<div style="text-align: left;">
<style>
h2 {{ margin-bottom: 0; color: #305496; }} /* Set title color here */
p {{ margin-top: 0; }}
</style>
<h2>Production</h2>
<p>{f"|CM-WIP| - <b>{file_date}</b>: Open WO at IDD based on QAD (ERP) | [Daily update]"}</p>
</div>
"""),
wip_selected_top,
filter_widgets_Prod,
wip_table,
height=600, # Set a fixed height to Production doashboard stays within the 600
sizing_mode='stretch_width' # Adjust sizing mode
)
#New 09/20
################################################################################
# Display the selected 'Pty Indice' & 'Drawing#' on the dashboard
#################################################################################
# Display a big text representing the slection of the indice_widget as: "['Pty Indice'] 'IDD Top Level' ('SEDA Top Level' minus last 4 carracters)"
def generate_display_text():
selected_indice = indice_widget.value # This could be 'All' or a specific selection
selected_priority = priority_widget.value # Get the selected priority
selected_program = program_widget.value # Get the selected program
# Filter the dataframe based on selected priority and program
filtered_rows = df_Summary[(df_Summary['Priority'] == selected_priority) & (df_Summary['Program'] == selected_program)]
if selected_indice == 'All':
# Do not filter by Pty Indice when 'All' is selected
filtered_rows = filtered_rows
else:
# Filter based on Pty Indice if a single one or multiple are selected
if isinstance(selected_indice, list):
filtered_rows = filtered_rows[filtered_rows['Pty Indice'].isin(selected_indice)]
else:
filtered_rows = filtered_rows[filtered_rows['Pty Indice'] == selected_indice]
# Check if filtered_rows is empty
if filtered_rows.empty:
return "<span style='font-size: 18px; color: red;'>No matching data found</span>"
# Create a set to hold unique formatted strings
display_set = set()
# Iterate over all matching rows
for _, row in filtered_rows.iterrows():
idd_top_level = row['IDD Top Level'] # Adjust column name if needed
seda_top_level = row['SEDA Top Level'] # Adjust column name if needed
# Remove last 4 characters from 'SEDA Top Level'
# Convert to string first and handle NaN/None values
seda_top_level = str(row['SEDA Top Level']) if not pd.isnull(row['SEDA Top Level']) else "" # added 02/03
seda_trimmed = seda_top_level[:-4] if len(seda_top_level) > 4 else seda_top_level
# Get the current 'Pty Indice'
pty_indice = row['Pty Indice']
# Create the formatted string for each row
display_text = f"<b>{pty_indice} {idd_top_level}</b> (drawing# {seda_trimmed})"
display_set.add(display_text) # Use a set to ensure uniqueness
# Sort the unique display texts
sorted_display_texts = sorted(display_set)
# Group display text into chunks of 3 for line breaks
grouped_text = []
for i in range(0, len(sorted_display_texts), 3):
# Join groups of 3 values and add them to grouped_text
grouped_text.append("; ".join(sorted_display_texts[i:i+3]))
# Join the grouped text with a line break <br>
final_display_text = "<br>".join(grouped_text)
# Wrap the final text in the desired styling
return f"<span style='font-size: 18px; color: #32599E;'>{final_display_text}</span>"
# Create a widget to display the formatted text using HTML pane
text_widget = pn.pane.HTML(generate_display_text())
# Callback to update the text when indice_widget, priority_widget, or program_widget changes
def update_text(event):
text_widget.object = generate_display_text()
# Attach the callback to the widget
indice_widget.param.watch(update_text, 'value')
priority_widget.param.watch(update_text, 'value')
program_widget.param.watch(update_text, 'value')
#//////////////////////////////////////////////////#//////////////////////////////////////////////////
###*********************#################********************##############*************************
####################################################################################################################
# Creating the complete dashboard
#####################################################################################################################
#//////////////////////////////////////////////////#//////////////////////////////////////////////////
#########################################################
# Watch changes on widget
#########################################################
program_widget.param.watch(update_priorities, 'value')
priority_widget.param.watch(update_indices, 'value')
#########################################################
# Callbaks
#########################################################
# Define callbacks for widget events - update_changes_table
program_widget.param.watch(update_changes_table, 'value')
priority_widget.param.watch(update_changes_table, 'value')
indice_widget.param.watch(update_changes_table, 'value')
# Define callbacks for widget events - update_sales_table
program_widget.param.watch(update_sales_table, 'value')
priority_widget.param.watch(update_sales_table, 'value')
indice_widget.param.watch(update_sales_table, 'value')
# Define callbacks for widget events - update_sales_summary
program_widget.param.watch(update_sales_summary, 'value')
priority_widget.param.watch(update_sales_summary, 'value')
indice_widget.param.watch(update_sales_summary, 'value')
# Define callbacks for widget events - update_supply_table
program_widget.param.watch(update_supply_table, 'value')
priority_widget.param.watch(update_supply_table, 'value')
indice_widget.param.watch(update_supply_table, 'value')
# Define callbacks for widget events - update_turnover_table
program_widget.param.watch(update_turnover_table, 'value')
priority_widget.param.watch(update_turnover_table, 'value')
indice_widget.param.watch(update_turnover_table, 'value')
# Define callbacks for widget events - update_turnover_summary
program_widget.param.watch(update_turnover_summary, 'value')
priority_widget.param.watch(update_turnover_summary, 'value')
indice_widget.param.watch(update_turnover_summary, 'value')
# Define callbacks for widget events - update_supply_selected_top
program_widget.param.watch(update_supply_selected_top, 'value')
priority_widget.param.watch(update_supply_selected_top, 'value')
indice_widget.param.watch(update_supply_selected_top, 'value')
# Define callbacks for widget events - update_wip_selected_top
program_widget.param.watch(update_wip_selected_top, 'value')
priority_widget.param.watch(update_wip_selected_top, 'value')
indice_widget.param.watch(update_wip_selected_top, 'value')
# Update 09/16
# Set up callbacks for all widgets
for widget in [program_widget, priority_widget, indice_widget] + list(filters_Prod.values()):
widget.param.watch(widget_change_prod, 'value')
# New 09/03
# Set up callbacks for supply chain table purchased archi
program_widget.param.watch(on_widget_change_supply, 'value')
priority_widget.param.watch(on_widget_change_supply, 'value')
indice_widget.param.watch(on_widget_change_supply, 'value')
# Update 09/16
# Set up callbacks for supply chain table full archi
for widget in [program_widget, priority_widget, indice_widget] + list(filters_fullArchi.values()):
widget.param.watch(lambda event: update_supply_table_fullArchi(event), 'value')
# Link widget and plot update for Graph 13bis
program_widget.param.watch(update_plot_13bis, 'value')
priority_widget.param.watch(update_plot_13bis, 'value')
indice_widget.param.watch(update_plot_13bis, 'value')
# Link widget and plot update for Graph 13bbis
program_widget.param.watch(update_plot_13bbis, 'value')
priority_widget.param.watch(update_plot_13bbis, 'value')
indice_widget.param.watch(update_plot_13bbis, 'value')
# Link widget and plot update for Graph 14bis
program_widget.param.watch(update_plot_14bis, 'value')
priority_widget.param.watch(update_plot_14bis, 'value')
indice_widget.param.watch(update_plot_14bis, 'value')
# Link widget and plot update for Graph 14bis_2
program_widget.param.watch(update_plot_14bis_2, 'value')
priority_widget.param.watch(update_plot_14bis_2, 'value')
indice_widget.param.watch(update_plot_14bis_2, 'value')
# Link widget and plot update for Graph 14bbis
program_widget.param.watch(update_plot_14bbis, 'value')
priority_widget.param.watch(update_plot_14bbis, 'value')
indice_widget.param.watch(update_plot_14bbis, 'value')
# Link widget and plot update for Graph 14bbis
program_widget.param.watch(update_plot_15bis, 'value')
priority_widget.param.watch(update_plot_15bis, 'value')
indice_widget.param.watch(update_plot_15bis, 'value')
################################################################
# Define the cadrans_dashboard layout
################################################################
text_above_4cadrans_graphs = (
f"These graphs are based on data from |Snapshot| - <b>{file_date}</b>:<br>"
"▷ <b>Snapshot table</b>: Represents the remaining scope of the Transfer Project for the selected 'Program'.<br>"
"➥ It includes all PNs that have an existing IDD Backlog or for which the 'Critical Quantity,' defined as part of the transfer project, has not yet been reached. This applies even if the PN is not currently listed in the IDD Backlog.<br>"
"➥ Some PNs may not yet have an assigned IDD PN under 'IDD Top-Level'. In such cases, the BOM does not exist, and the given PN won't be present in this table; therefore, no data will be available for the graphs.<br>"
)
# Define your color
line_color = "#4472C4" # Change this to your desired color
font_top_color = "#4472C4"
#subtitle_backgroud_color = "#F2F2F2" #Gray
subtitle_background_color = "#aee0d9"
#------------------------------------------
# Convert the string to a datetime object using the format "%m-%d-%Y" to match the m-d-Y format
file_date_obj = datetime.strptime(file_date, "%m-%d-%Y")
# Format the datetime object into the desired m/d/Y format
formatted_date = file_date_obj.strftime("%m/%d/%Y")
# include within cadran title
cadrans_title = f"Status snapshot & 4 quadrant [{formatted_date}]"
#------------------------------------------
candrans_subtitle = "Selection of the Priority to be displayed on the dashboard"
candrans_subtitle2 = "Snapshot"
candrans_subtitle3 = " 4 quadrant - Engineering / Backlog / Supply Chain / Production"
# Create vertical and horizontal divs to act as colored lines
vertical_line = pn.pane.HTML(f"<div style='width: 6px; height: 800px; background-color: {line_color};'></div>")
horizontal_line = pn.pane.HTML(f"<div style='width: 2600px; height: 6px; background-color: {line_color};'></div>")
title_section = pn.pane.HTML(f"""
<div style='background-color: {font_top_color}; width: 100%; padding: 10px; box-sizing: border-box;'>
<h1 style='font-size: 24px; color: white; text-align: left; margin: 0;'>{cadrans_title}</h1>
</div>
""", sizing_mode='stretch_width')
# Title Layout
title_layout = pn.Column(
title_section,
pn.layout.Divider(margin=(-10, 0, 0, 0)), # Title divider
pn.Column(
#pn.pane.HTML(f"<h3 style='font-size: 12px; text-align: center; font-weight: normal;'>{candrans_subtitle}</h3>"),
pn.layout.Spacer(height=5), # Spacer to add space after subtitle
pn.Row(
program_widget,
priority_widget,
indice_widget,
pn.Column( # Group the spacer and text_widget inside a column for proper vertical spacing
pn.layout.Spacer(height=15), # Spacer to add space before text_widget
text_widget # Your formatted text widget
),
pn.layout.Spacer(height=10), # Spacer to add space after widget selection
sizing_mode='stretch_width' # Ensure the row stretches to fill the width
),
sizing_mode='stretch_width' # Ensure the column stretches to fill the width
),
sizing_mode='stretch_width' # Ensure the title layout stretches to fill the width
)
# Define Secondary Layout
secondary_layout = pn.Column(
pn.pane.HTML(f"""
<div style='background-color: {subtitle_background_color};
width: 100%;
padding: 10px;
box-sizing: border-box;
border-radius: 15px;'> <!-- Corrected closing div tag -->
<h1 style='font-size: 22px; color: white; text-align: left; margin: 0;'>
{candrans_subtitle2}
</h1>
</div>
""",sizing_mode='stretch_width'),
#pn.layout.Divider(margin=(-10, 0, 0, 0)), # Title divider
pn.Spacer(height=10), # Spacer before plots
text_above_4cadrans_graphs,
pn.Spacer(height=10), # Spacer before plots
combined_dashboard,
sizing_mode='stretch_width', # Ensure the secondary layout stretches to fill the width
height=1000 # Set a fixed height to prevent overlap # 10/07 920 --> 1000
)
# Define Primary Layout
primary_layout = pn.Column(
pn.Column(
pn.pane.HTML(f"""
<div style='background-color: {subtitle_background_color};
width: 100%;
padding: 10px;
box-sizing: border-box;
border-radius: 15px;'> <!-- Corrected closing div tag -->
<h1 style='font-size: 22px; color: white; text-align: left; margin: 0;'>
{candrans_subtitle3}
</h1>
</div>
""",sizing_mode='stretch_width'),
#pn.layout.Divider(margin=(-10, 0, 0, 0)), # Title divider
pn.Row(
pn.Column(
changes_dashboard,
sizing_mode='stretch_width' # Adjust sizing mode for Engineering quadrant
),
vertical_line, # Add vertical line between columns
pn.Column(
production_dashboard,
sizing_mode='stretch_width' # Adjust sizing mode for Production quadrant
),
sizing_mode='stretch_width', # Adjust sizing mode for the entire row
),
horizontal_line, # Add horizontal line between upper and lower quadrants
pn.Row(
pn.Column(
supply_dashboard,
sizing_mode='stretch_both' # Adjust sizing mode for Supply Chain quadrant
),
vertical_line, # Add vertical line between columns
pn.Column(
sales_dashboard,
sizing_mode='stretch_both' # Adjust sizing mode for Sales quadrant
),
),
sizing_mode='stretch_both', # Adjust sizing mode for the entire primary layout
)
)
# Combine Title, Primary, and Secondary Layouts
cadrans_dashboard = pn.Column(
title_layout,
pn.layout.Divider(margin=(0, 0, -10, 0)), # Add some space between primary and secondary layouts if needed
secondary_layout,
pn.layout.Divider(margin=(0, 0, -10, 0)), # Add some space between primary and secondary layouts if needed
primary_layout,
sizing_mode='stretch_both' # Ensure the final layout stretches to fill available space
)
#||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#*****************************************************************************************************************************
##############################################################################################################################
# |Project Overview| - historic_dashboard
##############################################################################################################################
#*****************************************************************************************************************************
#||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
###############################################################
# Historic datafram
###############################################################
# Ensure 'Invoice date' is in datetime format
df_Historic['Invoice date'] = pd.to_datetime(df_Historic['Invoice date'])
#Updated 09/06 to replace 'Standard amount USD' with 'Currency turnover ex.VAT' which represents the sales
# Rename columns
df_Historic = df_Historic.rename(columns={
'Quantity': 'Quantity shipped',
'Currency turnover ex.VAT': 'Sales',
'Complexity': 'Average Complexity',
})
# Filter to exclude rows where 'Order' contains 'NC'
df_Historic = df_Historic[~df_Historic['Order'].str.contains('NC')]
##########################################
# Sorting df_Historic
###########################################
# Function to check if a value is numeric
def is_numeric(val):
try:
int(val)
return True
except ValueError:
return False
# Separate numeric and non-numeric 'Priority' values
df_numeric_priority = df_Historic[df_Historic['Priority'].apply(is_numeric)]
df_non_numeric_priority = df_Historic[~df_Historic['Priority'].apply(is_numeric)]
# Convert 'Priority' values to integers for numeric priorities
df_numeric_priority['Priority'] = df_numeric_priority['Priority'].astype(int)
# Sort numeric priorities in ascending order
#df_numeric_priority = df_numeric_priority.sort_values(by='Priority', ascending=True) - update 08/28
df_numeric_priority.sort_values(by=['Priority', 'Pty Indice'], inplace=True)
# Combine the DataFrames, placing numeric priorities first and non-numeric priorities at the end
df_Historic_sorted = pd.concat([df_numeric_priority, df_non_numeric_priority])
# Reset index if needed
df_Historic_sorted.reset_index(drop=True, inplace=True)
# Update the original DataFrame
df_Historic = df_Historic_sorted
#print('df_Historic:')
#display(df_Historic)
#####################################
# Group by 'Month' and 'Program'
########################################
monthly_summary = df_Historic.groupby(['Month', 'Program']).agg({
'Quantity shipped': 'sum',
'Sales': 'sum',
'Pty Indice': lambda x: ', '.join(map(str, x)),
'IDD Top Level': lambda x: ', '.join(x),
'SEDA Top Level': lambda x: ', '.join(x),
'IDD Marge Standard': 'sum',
'Invoice date': 'first', # Keep the 'Invoice date' as the first date in each group
'Average Complexity': 'mean' # Calculate the average complexity
}).reset_index()
# Define a function to format numbers with 1 decimal digit if necessary
def format_complexity(value):
if pd.isna(value): # Handle NaN values
return value
elif value.is_integer():
return int(value) # Return as integer if value is an integer
else:
return round(value, 1) # Round to 1 decimal place otherwise
# Apply the formatting function to 'Complexity' column
monthly_summary['Average Complexity'] = monthly_summary['Average Complexity'].apply(format_complexity)
#Create 'Normalized Complexity'
monthly_summary['Normalized Complexity'] = monthly_summary['Average Complexity']*monthly_summary['Quantity shipped']
###################################
# Fill NaN values appropriately
###################################
# Fill numeric columns with 0
numeric_cols = monthly_summary.select_dtypes(include='number').columns
monthly_summary[numeric_cols] = monthly_summary[numeric_cols].fillna(0)
# Fill string columns with ''
string_cols = monthly_summary.select_dtypes(include='object').columns
monthly_summary[string_cols] = monthly_summary[string_cols].fillna('')
# Ensure 'Invoice date' is in datetime format
df_Historic['Invoice date'] = pd.to_datetime(df_Historic['Invoice date'])
# Sort by 'Invoice date' in descending order
monthly_summary = monthly_summary.sort_values(by='Invoice date', ascending=True)
# Display the updated DataFrame
#print('monthly_summary:')
#display(monthly_summary)
#################################################################################################################
# Widgets initialization
################################################################################################################
# Example usage with defaults
default_program_historic = 'Phase 4-5'
# Initialize program widget, excluding NaN values
unique_programs_historic = df_Priority['Program'].dropna().unique().tolist()
program_widget_historic = pn.widgets.Select(name='Select Program', options=unique_programs_historic, value=default_program_historic)
program = program_widget_historic.value
#//////////////////////////////////////////////////
########################################
# Create Graphs Monthly_history
########################################
#//////////////////////////////////////////////////
# To review 09/06
# Custom color palette with alpha transparency
custom_palette = {
'Quantity shipped': '#5AB2CA',
'Sales': '#63BE7B',
'IDD Marge Standard': '#E2EFDA',
'Normalized Complexity': 'rgba(255, 47, 47, 0.7)' # Alpha applied
}
def customize_qty_shipped_plot(bokeh_plot):
""" Apply customizations to the Quantity Shipped plot. """
bokeh_plot.xaxis.major_label_text_font_size = '8pt'
bokeh_plot.yaxis.major_label_text_font_size = '10pt'
bokeh_plot.title.text_font_size = '12pt'
bokeh_plot.title.text_color = "#305496"
bokeh_plot.xaxis.axis_line_width = 2
bokeh_plot.yaxis.axis_line_width = 2
bokeh_plot.xaxis.major_label_orientation = 'vertical' # 10/23 change to vertical
bokeh_plot.yaxis.major_label_orientation = 'horizontal'
bokeh_plot.yaxis.axis_label_text_font_size = '10pt'
bokeh_plot.ygrid.grid_line_color = '#E0E0E0'
bokeh_plot.ygrid.grid_line_dash = [4, 6]
bokeh_plot.toolbar.logo = None
return bokeh_plot
def customize_combined_plot(bokeh_plot):
""" Apply customizations to the Combined plot. """
bokeh_plot.xaxis.major_label_text_font_size = '8pt'
bokeh_plot.yaxis.major_label_text_font_size = '10pt'
bokeh_plot.title.text_font_size = '12pt'
bokeh_plot.title.text_color = "#305496"
bokeh_plot.xaxis.axis_line_width = 2
bokeh_plot.yaxis.axis_line_width = 2
bokeh_plot.xaxis.major_label_orientation = 'vertical' # 10/23 change to vertical
bokeh_plot.yaxis.major_label_orientation = 'horizontal'
bokeh_plot.yaxis.axis_label_text_font_size = '10pt'
bokeh_plot.ygrid.grid_line_color = '#F0F0F0'
bokeh_plot.ygrid.grid_line_dash = [4, 6]
bokeh_plot.toolbar.logo = None
# Format the y-axis ticks in thousands with a dollar sign
bokeh_plot.yaxis.formatter =CustomJSTickFormatter(code="""
return '$' + (tick / 1000).toFixed(0) + 'k';
""")
return bokeh_plot
def customize_total_quantity_plot(bokeh_plot):
""" Apply customizations to the Total Quantity plot. """
bokeh_plot.xaxis.major_label_text_font_size = '8pt'
bokeh_plot.yaxis.major_label_text_font_size = '10pt'
bokeh_plot.title.text_font_size = '12pt'
bokeh_plot.title.text_color = "#305496"
bokeh_plot.xaxis.axis_line_width = 2
bokeh_plot.yaxis.axis_line_width = 2
bokeh_plot.xaxis.major_label_orientation = 'vertical'
bokeh_plot.yaxis.major_label_orientation = 'horizontal'
bokeh_plot.yaxis.axis_label_text_font_size = '10pt'
bokeh_plot.ygrid.grid_line_color = '#E0E0E0'
bokeh_plot.ygrid.grid_line_dash = [4, 6]
bokeh_plot.toolbar.logo = None
return bokeh_plot
def create_total_quantity_plot(df_Historic, default_program_historic):
# Filter data by the default program
filtered_data = df_Historic[df_Historic['Program'] == default_program_historic]
if filtered_data.empty:
print("No data found for the default program.")
return None
# Aggregate data: Sum 'Quantity shipped' for each 'Pty Indice'
aggregated_data = filtered_data.groupby('Pty Indice')['Quantity shipped'].sum().reset_index()
# If the program is 'Phase 4-5', sort by 'Priority'
if default_program_historic == 'Phase 4-5':
# Merge the aggregated data with original to retain 'Priority'
aggregated_data = pd.merge(aggregated_data, filtered_data[['Pty Indice', 'Priority']].drop_duplicates(), on='Pty Indice')
# Convert 'Priority' values to integers for sorting
aggregated_data['Priority'] = aggregated_data['Priority'].astype(int)
# Sort numeric priorities in ascending order
#aggregated_data = aggregated_data.sort_values(by='Priority', ascending=True) - update 08/28
aggregated_data.sort_values(by=['Priority', 'Pty Indice'], inplace=True)
# Define the uniform color - 09/12
uniform_color = '#5AB2CA' # Light blue color
# Create the plot Total Quantity Shipped Monthly
total_quantity_plot = aggregated_data.hvplot.bar(
x='Pty Indice',
y='Quantity shipped',
title="Total Quantity Shipped Monthly",
xlabel='Pty Indice',
ylabel='Total Quantity Shipped',
#cmap=custom_palette,
color=uniform_color, # Apply the same color to all bars
legend='top_left',
height=400,
tools=[]
)
return total_quantity_plot
def create_plots(monthly_summary, default_program_historic, df_Historic):
# Filter data by default program
filtered_data = monthly_summary[monthly_summary['Program'] == default_program_historic]
if filtered_data.empty:
print("No data found for the default program.")
return None, None, None
# Melt the DataFrame to include Normalized Complexity
melted_df = filtered_data.melt(id_vars=['Month'], value_vars=['Quantity shipped', 'Sales', 'IDD Marge Standard', 'Normalized Complexity'],
var_name='Quantity Type', value_name='Quantity Value')
# Create plot for 'Quantity shipped' and 'Normalized Complexity'
qty_shipped_plot = melted_df[melted_df['Quantity Type'].isin(['Quantity shipped', 'Normalized Complexity'])].hvplot.bar(
x='Month',
y='Quantity Value',
color='Quantity Type',
title="Monthly shipment - Quantity Shipped & Normalized Complexity",
xlabel='Month',
ylabel='Top-Level shipped [Quantity]',
cmap=custom_palette,
legend='top_left',
height=400,
bar_width=0.6, # Set bar width - 09/12
tools=[]
)
bokeh_qty_shipped_plot = hv.render(qty_shipped_plot, backend='bokeh')
bokeh_qty_shipped_plot = customize_qty_shipped_plot(bokeh_qty_shipped_plot)
#New 08/09
#####################################################
# Remove existing HoverTools (if any) before adding a new one
bokeh_qty_shipped_plot.tools = [tool for tool in bokeh_qty_shipped_plot.tools if not isinstance(tool, HoverTool)]
# Add HoverTool with custom formatting
hover = HoverTool()
hover.tooltips = [
("Month", "@Month"),
("KPI", "@color"),
("Value", "@Quantity_Value")
]
# Add HoverTool to the plot
bokeh_qty_shipped_plot.add_tools(hover)
# Remove wheel zoom from active tools if you want it inactive by default - 08/12
bokeh_qty_shipped_plot.tools = [tool for tool in bokeh_qty_shipped_plot.tools if not isinstance(tool, WheelZoomTool)]
############################################################
# Create combined plot for 'IDD Marge Standard' and 'Sales'
combined_plot = melted_df[melted_df['Quantity Type'].isin(['IDD Marge Standard', 'Sales'])].hvplot.bar(
x='Month',
y='Quantity Value',
color='Quantity Type',
title="Monthly shipment - IDD Margin & Total Sales",
xlabel='Month',
ylabel='[K$]',
cmap=custom_palette,
legend='top_left',
stacked=True, # Stacking bars
height=400,
bar_width=0.6, # Set bar width - 09/12
tools=[]
)
bokeh_combined_plot = hv.render(combined_plot, backend='bokeh')
bokeh_combined_plot = customize_combined_plot(bokeh_combined_plot)
#New 08/08
#####################################################
# Remove existing HoverTools (if any) before adding a new one
bokeh_combined_plot.tools = [tool for tool in bokeh_combined_plot.tools if not isinstance(tool, HoverTool)]
# Add HoverTool with custom formatting
hover = HoverTool()
hover.tooltips = [
("Month", "@Month"),
("KPI", "@color"),
("Value", "@Quantity_Value{($0,0k)}") # Format values: thousands with 'K' # Quantity_Value with the '_' otherwise that does not work!
]
# Add HoverTool to the plot
bokeh_combined_plot.add_tools(hover)
# Remove wheel zoom from active tools if you want it inactive by default - 08/12
bokeh_combined_plot.tools = [tool for tool in bokeh_combined_plot.tools if not isinstance(tool, WheelZoomTool)]
############################################################
# Create Total Quantity Shipped plot
total_quantity_plot = create_total_quantity_plot(df_Historic, default_program_historic)
if total_quantity_plot:
bokeh_total_quantity_plot = hv.render(total_quantity_plot, backend='bokeh')
bokeh_total_quantity_plot = customize_total_quantity_plot(bokeh_total_quantity_plot)
else:
bokeh_total_quantity_plot = None
# Remove wheel zoom from active tools if you want it inactive by default - 08/12
bokeh_total_quantity_plot.tools = [tool for tool in bokeh_total_quantity_plot.tools if not isinstance(tool, WheelZoomTool)]
return bokeh_qty_shipped_plot, bokeh_combined_plot, bokeh_total_quantity_plot
def update_plots(event):
# Get the selected program from the widget
program = program_widget_historic.value
#print(f"Updating plots for program: {program}")
# Filter data by the selected program
filtered_data = monthly_summary[monthly_summary['Program'] == program]
if filtered_data.empty:
print("No data found for the selected program.")
return
# Melt the DataFrame
melted_df = filtered_data.melt(id_vars=['Month'], value_vars=['Quantity shipped', 'Sales', 'IDD Marge Standard', 'Normalized Complexity'],
var_name='Quantity Type', value_name='Quantity Value')
# Update plots
bokeh_qty_shipped_plot, bokeh_combined_plot, bokeh_total_quantity_plot = create_plots(filtered_data, program, df_Historic)
# Update the plots in the Panel layout
plot_pane1.object = bokeh_qty_shipped_plot
plot_pane2.object = bokeh_combined_plot
plot_pane3.object = bokeh_total_quantity_plot
# Initial setup of the plots
bokeh_qty_shipped_plot, bokeh_combined_plot, bokeh_total_quantity_plot = create_plots(monthly_summary, default_program_historic, df_Historic)
# Convert Bokeh plots to Panel
plot_pane1 = pn.pane.Bokeh(bokeh_qty_shipped_plot, sizing_mode='stretch_width')
plot_pane2 = pn.pane.Bokeh(bokeh_combined_plot, sizing_mode='stretch_width')
plot_pane3 = pn.pane.Bokeh(bokeh_total_quantity_plot, sizing_mode='stretch_width')
# Update plot initially - Needed for the sizing_mode='stretch_width' to be set
update_plots(None)
#New 08/17
############################################################################################
# Display the datafram monthly_summary of list of Pty Indice for each Month under Graph 3
#############################################################################################
# Function to remove duplicates in comma-separated strings
def remove_duplicates_from_string(s):
items = s.split(', ')
unique_items = sorted(set(items), key=items.index) # Preserve order
return ', '.join(unique_items)
# Filter DataFrame by program
# Function to filter and sort DataFrame by program and month
def filter_dataframe_monthly_summary(program):
# Apply the filter based on selected program
filtered_df = monthly_summary[monthly_summary['Program'] == program]
# Check if the filtered DataFrame is empty
if filtered_df.empty:
print("No data found for the specified program.")
return filtered_df # Return empty DataFrame if no matches found
# Filter columns
filtered_df = filtered_df[['Month', 'Pty Indice', 'Quantity shipped', 'IDD Top Level']]
# Remove duplicates in specified columns
filtered_df['Pty Indice'] = filtered_df['Pty Indice'].apply(remove_duplicates_from_string)
filtered_df['IDD Top Level'] = filtered_df['IDD Top Level'].apply(remove_duplicates_from_string)
# Create a temporary column for sorting by converting 'Month' to datetime
filtered_df['Month_dt'] = pd.to_datetime(filtered_df['Month'], format='%b %y', errors='coerce')
# Check for any invalid dates after conversion
if filtered_df['Month_dt'].isnull().any():
print("Some dates could not be parsed. Please check the 'Month' column for incorrect formats.")
return filtered_df # Return DataFrame without sorting
# Sort by the new 'Month_dt' column
filtered_df = filtered_df.sort_values(by='Month_dt', ascending=False) # Set ascending to False to display most recent month first
# Reset the index after sorting
filtered_df.reset_index(drop=True, inplace=True)
# Print the DataFrame before deleting the temporary column
#print("Filtered and sorted DataFrame before dropping the temporary column:")
#display(filtered_df)
# Drop the 'Month_dt' column
filtered_df = filtered_df.drop(columns=['Month_dt'])
return filtered_df
#####################################################
# Table colored in bleu and white every other rows
#######################################################
# Function to apply custom styles to the DataFrame (alternating row colors)
def style_dataframe_bleu(df):
def row_styles(row):
# Alternate row colors based on row index
color = '#ADDAE5' if row.name % 2 == 0 else '#ffffff'
return [f'background-color: {color}'] * len(row)
# Apply the style function to the DataFrame rows
styled_df = df.style.apply(row_styles, axis=1)
# Hide the index
styled_df.hide(axis="index")
return styled_df
# Function to update DataFrame display with custom styling
def update_dataframe_monthly_summary(program):
filtered_df = filter_dataframe_monthly_summary(program)
styled_df = style_dataframe_bleu(filtered_df)
styled_html = styled_df.to_html() # New 10/24
# Add CSS for overflow handling directly in the HTML
html_with_overflow = f'<div style="overflow-y: auto; height: 450px;">{styled_html}</div>'
return html_with_overflow
# Callback function to update the table based on widget value
def update_table(event):
# Get the new filtered DataFrame based on the selected program in the widget
new_df = filter_dataframe_monthly_summary(event.new)
# Style the new DataFrame
styled_df = style_dataframe_bleu(new_df)
# Convert styled DataFrame to HTML for rendering
styled_html = styled_df.to_html() # Use 'to_html' instead of 'render' # New 10/24
# Update the DataFrame pane object directly with the styled DataFrame (without recreating the pane)
html_with_overflow = f'<div style="overflow-y: auto; height: 450px;">{styled_html}</div>'
monthly_summary_table.object = html_with_overflow
# Attach callback to the widget
program_widget_historic.param.watch(update_table, 'value')
# Create initial DataFrame table
monthly_summary_table = pn.pane.HTML(update_dataframe_monthly_summary(default_program_historic), width=500)
######################################
# Create text bellow graphs
########################################
# Convert 'Invoice date' to datetime format
df_Historic['Invoice date'] = pd.to_datetime(df_Historic['Invoice date'])
# Calculate the span period of the Turnover Report span_TurnoverReport
start_date_historic = df_Historic['Invoice date'].min()
end_date_historic = df_Historic['Invoice date'].max()
span_df_Historic = f"{start_date_historic.date()} to {end_date_historic.date()}" # Format dates as needed
text_below_graph_qty_shipped_plot = (
f"This graph is based on data from |CM-Historic| - Span: <b>{span_df_Historic}</b>:<br>"
f"▷ <b>Quantity shipped</b>: Total quantity of Top-Level related to the selected program shipped since {start_date_historic.date()}<br>"
"▷ <b>Normalized Complexity</b>: Average complexity of the Top-Level shipped normalized on the quantity of each PN shipped on the period.<br>"
"▷ <b>The complexity is define as</b>: Kit, Subs = 0, Lighplate = 1, Rotottelite = 2, CPA = 3, ISP = 4.<br>"
)
text_below_graph_Marge_Sales = (
f"This graph is based on data from |CM-Historic| - Span: <b>{span_df_Historic}</b>:<br>"
"▷ <b>Sales</b>: Sum of the 'Currency turnover ex.VAT' for the PN shipped during the specified month<br>"
"▷ <b>IDD Marge Standard</b>: Sum of the 'IDD Margin Standard' for the PN shipped during the specified month.<br>"
)
text_below_graph_shipped_pty_indice = (
f"This graph is based on data from |CM-Historic| - Span: <b>{span_df_Historic}</b>:<br>"
f"▷ <b>Total Quantity shipped</b>: Total quantity of Top-Level related to the selected pty Indice shipped since {start_date_historic.date()}.<br>"
)
###WORKING CODE
##############################################
# Combine plots into a vertical Panel layout
###############################################
#create short vertical divider
vertical_divider_medium = pn.pane.HTML(
'<div style="width: 1px; height: 500px; background-color:#D9D9D9;"></div>',
)
vertical_divider_medium2 = pn.pane.HTML(
'<div style="width: 1px; height: 500px; background-color:#D9D9D9;"></div>',
)
# Combine the plots and table in the layout
combined_plots_history = pn.Column(
pn.Row(
pn.Column(
plot_pane1,
text_below_graph_qty_shipped_plot
),
pn.Spacer(width=50),
vertical_divider_medium,
pn.Spacer(width=50),
pn.Column(
plot_pane2,
text_below_graph_Marge_Sales
),
sizing_mode='stretch_width'
),
pn.Spacer(height=50), # Spacer before the next row
pn.Row(
pn.Column(
plot_pane3,
text_below_graph_shipped_pty_indice
),
monthly_summary_table, # Add table pane to the right of the text and plot
)
)
########################################
# Call-out on program_widget_historic
########################################
program_widget_historic.param.watch(update_plots, 'value')
#//////////////////////////////////////////////////
###################################################
# Create Graphs Categories of products - 08/09
###################################################
#Load df_Priority as it has been filtered previously on the code
df_Priority = pd.read_excel(input_file_formatted, sheet_name='CM-Priority', index_col=False)
#----------------------------------------------------------
# 02/11 - Change 'Phase 4' or 'Phase 5' with 'Phase 4-5'
#----------------------------------------------------------
# For df_Priority
if 'Program' in df_Priority.columns and 'Pty Indice' in df_Priority.columns:
mask = (
df_Priority['Program'].isin(['Phase 4', 'Phase 5']) &
~df_Priority['Pty Indice'].str.contains('Phase5', na=False)
)
df_Priority.loc[mask, 'Program'] = 'Phase 4-5'
#----------------------------------------------------------
# Correctly accessing multiple columns
Pivot_table_distribution = df_Snapshot[['Pty Indice', 'Top-Level Status', 'Priority', 'Shipped', 'Remain. crit. Qty', 'Production Status', 'IDD Backlog Qty', 'Product Category', 'Critical Qty']]
# Apply mapping to create 'Program' column in df_WIP
Pivot_table_distribution['Program'] = Pivot_table_distribution['Pty Indice'].map(indice_to_program)
#Include missing Pty from df_Priority and fill 'IDD Backlog Qty' with 0
# Perform a left join, so all rows from Pivot_table_distribution are kept
Pivot_table_distribution = pd.merge(
Pivot_table_distribution,
df_Priority[['Pty Indice']].drop_duplicates(), # Selecting only the 'Pty Indice' column from df_Priority and dropping duplicates
on='Pty Indice',
how='left' # 'left' join to keep all rows from Pivot_table_distribution
)
#Fill 'IDD Backlog Qty' with 0 where missing
Pivot_table_distribution['IDD Backlog Qty'].fillna(0, inplace=True)
# Calculate necessary fields - Update 08/14: 'Total Quantity' should be set as 'IDD Backlog Qty' + 'Shipped' not just 'IDD Backlog Qty'
#Pivot_table_distribution['Total Quantity'] = Pivot_table_distribution[['Critical Qty', 'IDD Backlog Qty']].max(axis=1)
# Ensure that there are no NaN values in 'Shipped' and 'Remain. crit. Qty'
Pivot_table_distribution['Shipped'].fillna(0, inplace=True)
Pivot_table_distribution['Remain. crit. Qty'].fillna(0, inplace=True)
# Compute the sum of 'IDD Backlog Qty' and 'Shipped'
Pivot_table_distribution['Sum IDD Backlog and Shipped'] = Pivot_table_distribution['IDD Backlog Qty'] + Pivot_table_distribution['Shipped']
# Apply conditional logic: Total Quantity' = 'IDD Backlog Qty' + 'Shipped' ONLY if 'Remain. crit. Qty' = 0
Pivot_table_distribution['Total Quantity'] = np.where(
Pivot_table_distribution['Remain. crit. Qty'] == 0,
Pivot_table_distribution['Sum IDD Backlog and Shipped'],
Pivot_table_distribution[['Critical Qty', 'IDD Backlog Qty']].max(axis=1)
)
# Optionally, drop the intermediate column if no longer needed
#Pivot_table_distribution.drop(columns=['Sum IDD Backlog and Shipped'], inplace=True)
#print("Pivot_table_distribution:")
#display(Pivot_table_distribution)
#Saved 08/13 to include redlist in these tables
'''
###################################
# Aggregate data by Program and Product Category
aggregation_by_product_category = Pivot_table_distribution.groupby(['Program', 'Product Category']) \
.agg(Pty_Indice_Count_Product_Category=('Pty Indice', 'nunique')) \
.reset_index()
# Aggregate data by Program and Production Status
aggregation_by_production_status = Pivot_table_distribution.groupby(['Program', 'Production Status']) \
.agg(Pty_Indice_Count_Production_Status=('Pty Indice', 'nunique')) \
.reset_index()
#########################
# Print aggregated data
#########################
#print("Aggregated Data by Product Category:")
#display(aggregation_by_product_category)
#print("Aggregated Data by Production Status:")
#display(aggregation_by_production_status)
'''
#New 08/12
######################################################################################
# Define the % Completion of each Pty Indice in a new dataframe Pivot_table_completion
##########################################################################################
# Copy Pivot_table_distribution
Pivot_table_completion = Pivot_table_distribution.copy()
#print('Pivot_table_completion before adding new rows')
#display(Pivot_table_completion)
######################################################################################################################
# df_Priority is missing ['Top-Level Status', 'IDD Backlog Qty', 'Product Category', 'Total Quantity'] --> delete 'Top-Level Status', 'Product Category' & 'IDD Backlog Qty' from Dataframe
# If PN is not in df_snapshot and therefor not in Pivot_table_completion, it should mean that either:
# --> The PN does not have a BOM: Either not a Top-Level or prep work not completed.
# --> The PN is part of the redlist & 'Top-Level Status' = short or 'IDD Backlog Qty' = 0 or both: Filtered-out from df_Snapshot
# --> The values should be set as: = 'Total Quantity' ='Critical Qty'
####################################################################################################################
# Columns to remove from Pivot_table_completion
columns_to_remove = ['Top-Level Status', 'Production Status', 'IDD Backlog Qty', 'Product Category']
Pivot_table_completion = Pivot_table_completion.drop(columns=columns_to_remove, errors='ignore')
# Create mappings from df_Priority
priority_mapping = df_Priority.set_index('Pty Indice')['Priority']
program_mapping = df_Priority.set_index('Pty Indice')['Program']
shipped_mapping = df_Priority.set_index('Pty Indice')['Shipped']
# Map values to Pivot_table_completion
Pivot_table_completion['Priority'] = Pivot_table_completion['Pty Indice'].map(priority_mapping)
Pivot_table_completion['Program'] = Pivot_table_completion['Pty Indice'].map(program_mapping)
Pivot_table_completion['Qty Shipped'] = Pivot_table_completion['Pty Indice'].map(shipped_mapping)
# Fill missing values for 'Priority' and 'Program'
Pivot_table_completion['Priority'].fillna('Unknown', inplace=True)
Pivot_table_completion['Program'].fillna('Unknown', inplace=True)
# Optionally, filter out rows where 'Qty Shipped' is NaN or 0
Pivot_table_completion = Pivot_table_completion[Pivot_table_completion['Qty Shipped'].notna() & (Pivot_table_completion['Qty Shipped'] > 0)]
# Define criteria for including additional rows from df_Priority
# For example, you might want to include rows where 'Qty Shipped' > a certain threshold
additional_rows_criteria = df_Priority['Shipped'] > 0 # Example condition
additional_rows = df_Priority[additional_rows_criteria]
# Add only the rows that are not already in Pivot_table_completion based on 'Pty Indice'
additional_rows = additional_rows[~additional_rows['Pty Indice'].isin(Pivot_table_completion['Pty Indice'])]
# Select and rename columns to match Pivot_table_completion
additional_rows = additional_rows[['Pty Indice', 'Shipped', 'Priority', 'Program', 'Critical Qty']]
additional_rows.rename(columns={'Shipped': 'Qty Shipped'}, inplace=True)
# Append additional rows to Pivot_table_completion
Pivot_table_completion = pd.concat([Pivot_table_completion, additional_rows], ignore_index=True)
# Optionally, remove duplicates if necessary
Pivot_table_completion = Pivot_table_completion.drop_duplicates(subset='Pty Indice', keep='last')
# Fill 'Total Quantity' with 'Critical Qty' where 'Total Quantity' is NaN -- Update 08/14: When 'IDD Backlog' > 0, the 'Total Quantity' = 'IDD Backlog' + 'Qty Shipped'
Pivot_table_completion['Total Quantity'] = Pivot_table_completion['Total Quantity'].fillna(Pivot_table_completion['Critical Qty'])
#Sort by 'Priority' end place string at the end of the datafram
# Convert 'Priority' column to numeric, coercing errors (non-numeric entries become NaN)
Pivot_table_completion['Priority'] = pd.to_numeric(Pivot_table_completion['Priority'], errors='coerce')
# Fill NaNs with a default value (e.g., 999) for sorting
Pivot_table_completion['Priority'].fillna(999, inplace=True)
# Sort the DataFrame by 'Priority' column in ascending order
#Pivot_table_completion.sort_values(by='Priority', ascending=True, inplace=True) - Update 08/28
Pivot_table_completion.sort_values(by=['Priority', 'Pty Indice'], inplace=True)
# Reset index if needed
Pivot_table_completion.reset_index(drop=True, inplace=True)
#print('Pivot_table_completion with missing rows, no duplicates, and updated columns:')
#display(Pivot_table_completion)
########################################################################################
# Define % Conpletion Critical Qty = Critical Qty / Qty Shipped
Pivot_table_completion['% Completion Critical Qty'] = (Pivot_table_completion['Qty Shipped'] / Pivot_table_completion['Critical Qty']) * 100
# Cap the values at 100%
Pivot_table_completion['% Completion Critical Qty'] = Pivot_table_completion['% Completion Critical Qty'].clip(upper=100)
# Replace NaN values with 0
Pivot_table_completion['% Completion Critical Qty'] = Pivot_table_completion['% Completion Critical Qty'].fillna(0)
# Round to the nearest whole number
Pivot_table_completion['% Completion Critical Qty'] = Pivot_table_completion['% Completion Critical Qty'].round(0).astype(int)
# Replace NaN values with 0
Pivot_table_completion['% Completion Critical Qty'] = Pivot_table_completion['% Completion Critical Qty'].fillna(0)
# Define % Completion Total Qty = Total Qty / Qty Shipped
Pivot_table_completion['% Completion Total Qty'] = (Pivot_table_completion['Qty Shipped'] / Pivot_table_completion['Total Quantity']) * 100
# Replace NaN values with 0
Pivot_table_completion['% Completion Total Qty'] = Pivot_table_completion['% Completion Total Qty'].fillna(0)
# Round to the nearest whole number
#Pivot_table_completion['% Completion Total Qty'] = Pivot_table_completion['% Completion Total Qty'].round(0).astype(int) # saved 02/03
##### 02/03 ####################################
# First handle infinite values and NaNs
Pivot_table_completion['% Completion Total Qty'] = (
Pivot_table_completion['% Completion Total Qty']
.replace([np.inf, -np.inf], np.nan) # Replace infinities with NaN
.fillna(0) # Now fill all NaNs with 0
)
# Then perform rounding and conversion
Pivot_table_completion['% Completion Total Qty'] = (
Pivot_table_completion['% Completion Total Qty']
.round(0)
.astype(int)
)
####################################################
###############################
# Print Pivot_table_completion
################################
# Set options to display the entire DataFrame
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
#print('Pivot_table_completion with ALL column')
#display(Pivot_table_completion)
#############################################################################################################
# New 08/13 - Include product from the redlist 'To be transferred' or 'Canceled' or 'Officially transferred' within Pivot_table_completion
##############################################################################################################
Pivot_table_scope = df_Priority.copy()
#print('df_Priority without any filter')
#display(Pivot_table_scope)
#Function to get the 'Product Category' based on 'Description'
# Define the 'Product Category' based on 'Description'
def determine_category(description):
if not isinstance(description, str):
return 'Others'
if description == 'Rototellite':
return 'Rototellite'
elif 'Indicator' in description or 'CPA' in description:
return 'CPA'
elif 'Lightplate' in description:
return 'Lightplate'
elif 'ISP' in description or 'Keyboard' in description:
return 'ISP'
elif 'Module' in description:
return 'CPA'
elif 'optics' in description:
return 'Fiber Optics'
else:
return 'Others'
# Create 'Product Category' column based on the 'Description'
Pivot_table_scope['Product Category'] = Pivot_table_scope['Description'].apply(determine_category)
#Include 'Total Quantity' in Pivot_table_scope from Pivot_table_completion otherwise set 'Total Quantity' = 'Critical Qty'
# Merge with Pivot_table_completion to get 'Total Quantity'
Pivot_table_scope = Pivot_table_scope.merge(
Pivot_table_completion[['Pty Indice', 'Total Quantity']],
on='Pty Indice',
how='left'
)
# Fill in 'Total Quantity' where missing with 'Critical Qty'
Pivot_table_scope['Total Quantity'] = Pivot_table_scope['Total Quantity'].fillna(Pivot_table_scope['Critical Qty'])
#Keep only relevant column
Pivot_table_scope = Pivot_table_scope[['Pty Indice', 'Product Category', 'Critical Qty', 'Production Status', 'Total Quantity', 'Program']]
#print('Pivot_table_scope')
#display(Pivot_table_scope)
#################################################################################################
# Filter-out 'Canceled' PN from aggreagated data to exclude the canceled order from the scope
#################################################################################################
# Filter out rows where 'Production Status' is 'Canceled' OR 'Critical Qty' is 'To be canceled'
Pivot_table_scope_filtered = Pivot_table_scope[
~((Pivot_table_scope['Production Status'] == 'Canceled') |
(Pivot_table_scope['Critical Qty'] == 'To be canceled'))
]
######################################
#Ordering 'Product Category' from Pivot_table_scope_filtered: ISP, CPA, Lightplate, Rototellite, Others, Fiber Optics
#####################################
# Define the desired order for 'Product Category'
category_order = ['ISP', 'CPA', 'Lightplate', 'Rototellite', 'Others', 'Fiber Optics']
# Convert 'Product Category' to a categorical type with the specified order
Pivot_table_scope_filtered['Product Category'] = pd.Categorical(
Pivot_table_scope_filtered['Product Category'],
categories=category_order,
ordered=True
)
# Sort the DataFrame by 'Product Category'
Pivot_table_scope_filtered = Pivot_table_scope_filtered.sort_values(by='Product Category')
#print('Pivot_table_scope_filtered')
#display(Pivot_table_scope_filtered)
#######################################
# Aggregation on 'Product Category' and 'program', summing 'Total Quantity'
Pivot_table_scope_filtered_aggregated = Pivot_table_scope_filtered.groupby(['Product Category', 'Program'])['Total Quantity'].sum().reset_index()
#print('Pivot_table_scope_filtered_aggregated')
#display(Pivot_table_scope_filtered_aggregated)
# Aggregate data by Program and Product Category - Filtered to exclude canceled orders
aggregation_by_product_category = Pivot_table_scope_filtered.groupby(['Program', 'Product Category']) \
.agg(Pty_Indice_Count_Product_Category=('Pty Indice', 'nunique')) \
.reset_index()
# Aggregate data by Program and Production Status - Not filtered to include canceled orders
aggregation_by_production_status = Pivot_table_scope.groupby(['Program', 'Production Status']) \
.agg(Pty_Indice_Count_Production_Status=('Pty Indice', 'nunique')) \
.reset_index()
# Ordering aggregation_by_production_status by 'Production Status': Completed, Industrialized, FTB WIP, Proto WIP, FTB, Proto + FTB, To be transferred, Canceled, 'Officially transferred'
# Define the desired order for 'Production Status'
status_order = ['Completed', 'Industrialized', 'FTB WIP', 'Proto WIP', 'FTB', 'Proto + FTB', 'To be transferred', 'Canceled', 'Officially transferred']
# Convert 'Production Status' to a categorical type with the specified order
aggregation_by_production_status['Production Status'] = pd.Categorical(
aggregation_by_production_status['Production Status'],
categories=status_order,
ordered=True
)
# Sort the DataFrame by 'Production Status'
aggregation_by_production_status = aggregation_by_production_status.sort_values(by='Production Status')
#########################
# Print aggregated data
#########################
#print("Aggregated Data by Product Category - With redlist:")
#display(aggregation_by_product_category)
#print("Aggregated Data by Production Status - With redlist:")
#display(aggregation_by_production_status)
#New 08/14
#####################################################################################################################################
#### Include 'Combined PN' representing the full Transfer Project
## Pivot_table_completion contain only PN shipped, all the other PN are therefor at %0
## Get the other PN from CM-Priority (filtering-out 'Canceled') --> Pivot_table_scope_filtered does not contain the 'Canceled' PN but contain 'To be transferred'
####################################################################################################################################
# Merge Pivot_table_completion with Pivot_table_scope_filtered on 'Pty Indice' to get the '% Completion Total Qty' and '% Completion Critical Qty'
# Select only the relevant columns from Pivot_table_completion
completion_filtered = Pivot_table_completion[['Priority', 'Pty Indice', 'Qty Shipped', '% Completion Total Qty', '% Completion Critical Qty']]
# Merge to keep the column from Pivot_table_scope_filtered and include the missing from Pivot_table_completion
Pivot_table_completion_upated = pd.merge(Pivot_table_scope_filtered, completion_filtered, on='Pty Indice', how='left')
# Set 'NaN' from '% Completion Total Qty' and '% Completion Critical Qty' to 0
Pivot_table_completion_upated['% Completion Total Qty'] = Pivot_table_completion_upated['% Completion Total Qty'].fillna(0)
Pivot_table_completion_upated['% Completion Critical Qty'] = Pivot_table_completion_upated['% Completion Critical Qty'].fillna(0)
Pivot_table_completion_upated['Qty Shipped'] = Pivot_table_completion_upated['Qty Shipped'].fillna(0)
# Create a Priority Mapping from CM-Protity to Pivot_table_completion_upated on 'Pty Indice' for row with 'Pty Indice = NaN
priority_mapping = df_Priority.set_index('Pty Indice')['Priority']
#Identify rows with NaN Priority
missing_priority_mask = Pivot_table_completion_upated['Priority'].isna()
#Apply the Priority Mapping only to rows with NaN Priority
Pivot_table_completion_upated.loc[missing_priority_mask, 'Priority'] = Pivot_table_completion_upated.loc[missing_priority_mask, 'Pty Indice'].map(priority_mapping)
# Identify string values in 'Priority' column
string_mask = Pivot_table_completion_upated['Priority'].apply(lambda x: isinstance(x, str))
# Replace string values with 999
Pivot_table_completion_upated.loc[string_mask, 'Priority'] = 999
# Convert columns from float to integer
Pivot_table_completion_upated['Priority'] = Pivot_table_completion_upated['Priority'].astype(int)
Pivot_table_completion_upated['Total Quantity'] = Pivot_table_completion_upated['Total Quantity'].astype(int)
Pivot_table_completion_upated['Qty Shipped'] = Pivot_table_completion_upated['Qty Shipped'].astype(int)
# Round the percentage columns to one decimal place
Pivot_table_completion_upated['% Completion Total Qty'] = Pivot_table_completion_upated['% Completion Total Qty'].round(1)
Pivot_table_completion_upated['% Completion Critical Qty'] = Pivot_table_completion_upated['% Completion Critical Qty'].round(1)
# Create an empty list to store the new rows
new_rows = []
# Group by 'Program'
grouped = Pivot_table_completion_upated.groupby('Program')
# Calculate aggregated values for each group
for program, group in grouped:
total_quantity_sum = group['Total Quantity'].sum()
total_shipped_sum = group['Qty Shipped'].sum()
#average_completion_total_qty = group['% Completion Total Qty'].mean()
#average_completion_critical_qty = group['% Completion Critical Qty'].mean()
total_critical_qty = group['Critical Qty'].sum()
# Calculate completion percentages
# Handle division by zero cases by checking if total values are greater than zero
completion_total_qty = (total_shipped_sum / total_quantity_sum) * 100 if total_quantity_sum > 0 else 0
completion_critical_qty = (total_shipped_sum / total_critical_qty) * 100 if total_critical_qty > 0 else 0
# Create a new row for each program
new_row = {
'Pty Indice': f'Combined PN {program}',
'Priority': 98,
'Program': program,
'Product Category': 'Made up row, combined PN',
'Qty Shipped': total_shipped_sum,
'Critical Qty': total_critical_qty,
'Total Quantity': total_quantity_sum,
'% Completion Total Qty': completion_total_qty,
'% Completion Critical Qty': completion_critical_qty,
'Production Status': 'Made up row, combined PN'
}
# Append the new row to the list
new_rows.append(new_row)
# Convert the list of new rows into a DataFrame
new_rows_df = pd.DataFrame(new_rows)
# Append the new rows to the existing DataFrame
Pivot_table_completion_upated_combinedPN = pd.concat([Pivot_table_completion_upated, new_rows_df], ignore_index=True)
# Round the percentage columns to one decimal place
Pivot_table_completion_upated_combinedPN['% Completion Total Qty'] = Pivot_table_completion_upated_combinedPN['% Completion Total Qty'].round(1)
Pivot_table_completion_upated_combinedPN['% Completion Critical Qty'] = Pivot_table_completion_upated_combinedPN['% Completion Critical Qty'].round(1)
# Sort the DataFrame by 'Priority' - update 08/28
Pivot_table_completion_upated = Pivot_table_completion_upated.sort_values(by='Priority')
Pivot_table_completion_upated_combinedPN = Pivot_table_completion_upated_combinedPN.sort_values(by='Priority')
Pivot_table_completion_upated = Pivot_table_completion_upated.sort_values(by=['Priority', 'Pty Indice'])
Pivot_table_completion_upated_combinedPN = Pivot_table_completion_upated_combinedPN.sort_values(by=['Priority', 'Pty Indice'])
#print('Pivot_table_completion_upated')
#display(Pivot_table_completion_upated)
#print('Pivot_table_completion_upated_combinedPN')
#display(Pivot_table_completion_upated_combinedPN)
#New 08/14
###############################################################################################################
# Create new datafram Pourcentage_distribution based on Pivot_table_scope_filtered (canceled order excluded)
###############################################################################################################
#Copy Pivot_table_scope_filtered
Pourcentage_distribution = Pivot_table_scope_filtered.copy()
#print('Pourcentage_distribution')
#display(Pourcentage_distribution)
# Function to round numeric columns except the 'Program' column
def round_except_program(df, decimals=0, int_columns=None):
# Select columns to round (exclude 'Program')
numeric_columns = df.columns[df.columns != 'Program']
if int_columns:
# Convert specified columns to integer
df[int_columns] = df[int_columns].astype(int)
# Round remaining columns
df[numeric_columns] = df[numeric_columns].round(decimals)
return df
#########################################
# Group by TOTAL Qty 'Productin Status'
##########################################
# Group by 'Program' and 'Product Status'
status_distribution = Pourcentage_distribution.groupby(['Program', 'Production Status'])['Total Quantity'].sum().reset_index()
# Calculate percentage
status_total = status_distribution.groupby('Program')['Total Quantity'].transform('sum')
status_distribution['Percentage'] = (status_distribution['Total Quantity'] / status_total) * 100
# Pivot the table
status_distribution_pivot = status_distribution.pivot_table(
index='Program',
columns='Production Status',
values='Total Quantity',
aggfunc='sum',
fill_value=0
)
status_percentage_pivot = status_distribution.pivot_table(
index='Program',
columns='Production Status',
values='Percentage',
aggfunc='sum',
fill_value=0
)
# Resetting index for clarity
status_distribution_pivot = status_distribution_pivot.reset_index()
status_percentage_pivot = status_percentage_pivot.reset_index()
# Apply the function
#status_distribution_pivot = round_except_program(status_distribution_pivot, int_columns=['Completed', 'FTB', 'FTB WIP', 'Industrialized', 'Proto + FTB', 'Proto WIP', 'To be transferred', 'Officially transferred'])
status_distribution_pivot = round_except_program(status_distribution_pivot, int_columns=['Completed', 'FTB', 'FTB WIP', 'Industrialized', 'Proto + FTB', 'Proto WIP', 'Officially transferred'])
status_percentage_pivot = round_except_program(status_percentage_pivot, decimals=1)
#print
#print('status_distribution _pivot')
#display(status_distribution_pivot)
#print('status_percentage_pivot')
#display(status_percentage_pivot)
#####################################
# Group by TOTAL Qty Product Category'
#####################################
# Group by 'Program' and 'Product Category'
category_distribution = Pourcentage_distribution.groupby(['Program', 'Product Category'])['Total Quantity'].sum().reset_index()
# Calculate percentage
category_total = category_distribution.groupby('Program')['Total Quantity'].transform('sum')
category_distribution['Percentage'] = (category_distribution['Total Quantity'] / category_total) * 100
# Pivot the table
category_distribution_pivot = category_distribution.pivot_table(
index='Program',
columns='Product Category',
values='Total Quantity',
aggfunc='sum',
fill_value=0
)
category_percentage_pivot = category_distribution.pivot_table(
index='Program',
columns='Product Category',
values='Percentage',
aggfunc='sum',
fill_value=0
)
# Resetting index for clarity
category_distribution_pivot = category_distribution_pivot.reset_index()
category_percentage_pivot = category_percentage_pivot.reset_index()
# Apply the function for rounding
category_distribution_pivot = round_except_program(category_distribution_pivot, int_columns=['ISP', 'CPA', 'Lightplate', 'Rototellite', 'Others', 'Fiber Optics'])
category_percentage_pivot = round_except_program(category_percentage_pivot, decimals=1)
# print
#print('category_distribution_pivot')
#display(category_distribution_pivot)
############################################################################################################################
# Melt category_percentage_pivot & status_percentage_pivot - For Chart 4 - Distribution by TOTAL Quantity
###############################################################################################################################
# Melt the DataFrame to long format
df_melted_status_percentage_pivot = status_percentage_pivot.melt(id_vars=['Program'], var_name='Production Status', value_name='Percentage Status')
df_melted_category_percentage_pivot = category_percentage_pivot.melt(id_vars=['Program'], var_name='Product Category', value_name='Percentage Status')
#print('df_melted_status_percentage_pivot')
#display(df_melted_status_percentage_pivot)
# New 08/16
#########################################################
# - For Chart 5 - Distribution by UNIQUE Top-Level
########################################################
#Copy Pivot_table_scope_filtered
Pourcentage_distribution_Unique = Pivot_table_scope_filtered.copy()
##############################
# Group by UNIQUE 'Production Status'
###############################
# Group by 'Program' and 'Production Status'
status_distribution_unique = Pourcentage_distribution_Unique.groupby(['Program', 'Production Status'])['Pty Indice'].nunique().reset_index()
# Calculate total unique count for each Program
status_total_unique = status_distribution_unique.groupby('Program')['Pty Indice'].transform('sum')
# Calculate percentage
status_distribution_unique['Percentage'] = (status_distribution_unique['Pty Indice'] / status_total_unique) * 100
# Pivot the table
status_distribution_unique_pivot = status_distribution_unique.pivot_table(
index='Program',
columns='Production Status',
values='Pty Indice',
aggfunc='sum',
fill_value=0
)
status_percentage_unique_pivot = status_distribution_unique.pivot_table(
index='Program',
columns='Production Status',
values='Percentage',
aggfunc='sum',
fill_value=0
)
# Resetting index for clarity
status_distribution_unique_pivot = status_distribution_unique_pivot.reset_index()
status_percentage_unique_pivot = status_percentage_unique_pivot.reset_index()
# Apply the function for rounding
status_distribution_unique_pivot = round_except_program(status_distribution_unique_pivot, int_columns=['Completed', 'FTB', 'FTB WIP', 'Industrialized', 'Proto + FTB', 'Proto WIP', 'Officially transferred'])
status_percentage_unique_pivot = round_except_program(status_percentage_unique_pivot, decimals=1)
# print
#print('status_percentage_unique_pivot')
#display(status_percentage_unique_pivot)
##############################
# Group by UNIQUE 'Product Category'
###############################
# Group by 'Program' and 'Product Category'
category_distribution_unique = Pourcentage_distribution_Unique.groupby(['Program', 'Product Category'])['Pty Indice'].nunique().reset_index()
# Calculate total unique count for each Program
category_total_unique = category_distribution_unique.groupby('Program')['Pty Indice'].transform('sum')
# Calculate percentage
category_distribution_unique['Percentage'] = (category_distribution_unique['Pty Indice'] / category_total_unique) * 100
# Pivot the table
category_distribution_unique_pivot = category_distribution_unique.pivot_table(
index='Program',
columns='Product Category',
values='Pty Indice',
aggfunc='sum',
fill_value=0
)
category_percentage_unique_pivot = category_distribution_unique.pivot_table(
index='Program',
columns='Product Category',
values='Percentage',
aggfunc='sum',
fill_value=0
)
# Resetting index for clarity
category_distribution_unique_pivot = category_distribution_unique_pivot.reset_index()
category_percentage_unique_pivot = category_percentage_unique_pivot.reset_index()
# Apply the function for rounding
category_distribution_unique_pivot = round_except_program(category_distribution_unique_pivot, int_columns=['ISP', 'CPA', 'Lightplate', 'Rototellite', 'Others', 'Fiber Optics'])
category_percentage_unique_pivot = round_except_program(category_percentage_unique_pivot, decimals=1)
# print
#print('category_percentage_unique_pivot')
#display(category_percentage_unique_pivot)
############################################################################################################################
# Melt category_percentage_pivot & status_percentage_pivot - For Chart 5 - Distribution by UNIQUE Tpop-Level
###############################################################################################################################
# Melt the DataFrame to long format
#df_melted_status_percentage_pivot_UNIQUE = status_percentage_unique_pivot.melt(id_vars=['Program'], var_name='Production Status Unique', value_name='Percentage Status Unique')
#df_melted_category_percentage_pivot = category_distribution_unique_pivot.melt(id_vars=['Program'], var_name='Product Category Unique', value_name='Percentage Status Unique')
#print('df_melted_status_percentage_pivot_UNIQUE')
#display(df_melted_status_percentage_pivot_UNIQUE)
# Update on 18/16 to include Crossfiltering
#//////////////////////////////////////////////////#//////////////////////////////////////////////////#/////////////////////////////////////
##########################################################################################################################################
# Chart 1 - Percentage distribution of Product Categories --> Based on dataframe 'aggregation_by_product_category'
# Representing of each unique IDD Top-Level for each categories
##########################################################################################################################################
# Chart 2 - Percentage distribution of unique Top-Level by Product Category --> Based on dataframe 'Pivot_table_scope_filtered_aggregated'
#Representing of each category based on of the Total quantity to Build of each Pty Indice including in each categories
##########################################################################################################################################
# Chart 3 - Percentage distribution of Production Status --> Based on dataframe 'aggregation_by_production_status'
#Representing of each Production Status based on each unique Pty Indice
##########################################################################################################################################
#//////////////////////////////////////////////////#//////////////////////////////////////////////////#/////////////////////////////////////
#########################################################################################
#//////////////////////////////////////////////////#////////////////////////////////////
# Bar charts of product categories distribution and production statuses distribution
#########################################################################################
#//////////////////////////////////////////////////#////////////////////////////////////
#Update 08/20
# Define colors mapping if used
colors_palette = {
'CPA': '#a2d5d6',
'ISP': '#64179d',
'Lightplate': '#dfddda',
'Others': '#cfa8cf',
'Rototellite': '#233b3f',
#'Readout ASSY':'#ea9770',
'Fiber Optics':'#ea9770',
'Completed': 'green',
'FTB': '#9EC0F6',
'FTB WIP': '#DAEEF3',
'Industrialized': '#a7d0ac',
'Proto + FTB': '#6199ea',
'Proto WIP': '#00d3ff',
'To be transferred':'#F2DCDB',
'Officially transferred':'#FF7A5B',
'Canceled': '#F35757',
}
# Update 10/09 to remove border from the bars
def customize_distribution_plot(bokeh_plot):
""" Apply customizations to the Total Quantity plot. """
bokeh_plot.xaxis.major_label_text_font_size = '10pt'
bokeh_plot.yaxis.major_label_text_font_size = '10pt'
#bokeh_plot.title.text_font_size = '12pt' # removed 10/07 to comply with the use of 'Div'
#bokeh_plot.title.text_color = "#305496" # removed 10/07 to comply with the use of 'Div'
bokeh_plot.xaxis.axis_line_width = 2
bokeh_plot.yaxis.axis_line_width = 2
bokeh_plot.xaxis.major_label_orientation = 'horizontal'
bokeh_plot.xaxis.major_label_orientation = 45
bokeh_plot.yaxis.major_label_orientation = 'horizontal'
#bokeh_plot.xaxis.major_label_text_font_style = 'bold' # Make x-axis labels bold
bokeh_plot.yaxis.axis_label_text_font_size = '10pt'
bokeh_plot.ygrid.grid_line_color = '#E0E0E0'
bokeh_plot.ygrid.grid_line_dash = [4, 6]
bokeh_plot.toolbar.logo = None
return bokeh_plot
###########################
# Bar chart 1
###########################
def create_product_category(aggregation_by_product_category, program):
# Filter data by the default program
filtered_data = aggregation_by_product_category[aggregation_by_product_category['Program'] == program]
if filtered_data.empty:
print("No data found for the default program.")
return None
# Sort the filtered data by 'Pty_Indice_Count_Product_Category'
filtered_data = filtered_data.sort_values(by='Pty_Indice_Count_Product_Category')
# Create the plot
product_category_plot = filtered_data.hvplot.bar(
x='Product Category',
y='Pty_Indice_Count_Product_Category',
title=None, # Remove the built-in title
xlabel='Product Category',
ylabel='Nb of unique Top-Level',
cmap=colors_palette,
color='Product Category',
legend='top_left',
bar_width=0.6, # Set bar width
tools=[]
)
# Render the plot to Bokeh
bokeh_product_category_plot = hv.render(product_category_plot, backend='bokeh')
# Set explicit dimensions for the Bokeh plot
bokeh_product_category_plot.width = 400
bokeh_product_category_plot.height = 600
# Apply customizations
bokeh_product_category_plot = customize_distribution_plot(bokeh_product_category_plot)
#New 10/09
# Remove borders from the bars inside the 'create_product_category' function
for renderer in bokeh_product_category_plot.renderers:
if isinstance(renderer.glyph, VBar): # Check if the renderer is a VBar
renderer.glyph.line_color = None # Remove the borders from the bars
# Create the HTML formatted title using Div
title_text = """<span style="font-size: 16px; color: #305496; margin-left: 60px;">
<b>Unique</b> Top-Level <b>Quantity</b> by Product Category
</span>"""
title_product_category_plot = Div(text=title_text)
#####################################################
# Remove existing HoverTools (if any) before adding a new one
bokeh_product_category_plot.tools = [tool for tool in bokeh_product_category_plot.tools if not isinstance(tool, HoverTool)]
# Add HoverTool with custom formatting
hover = HoverTool()
hover.tooltips = [
("Product Category", "@Product_Category"),
("Unique PN", "@Pty_Indice_Count_Product_Category")
]
# Add HoverTool to the plot
bokeh_product_category_plot.add_tools(hover)
# Remove wheel zoom from active tools if you want it inactive by default
bokeh_product_category_plot.tools = [tool for tool in bokeh_product_category_plot.tools if not isinstance(tool, WheelZoomTool)]
############################################################
# Combine title and plot into a Panel Column layout
product_category_plot_layout = pn.Column(title_product_category_plot, bokeh_product_category_plot)
return product_category_plot_layout
def update_plots_product_category(event):
# Get the selected program from the widget
#program = program_widget_historic.value
program = event.new if event else program_widget_historic.value
print(f"Event new value: {event.new}") # 01/09/25
# Filter data by the selected program
filtered_data = aggregation_by_product_category[aggregation_by_product_category['Program'] == program]
if filtered_data.empty:
print("No data found for the selected program.")
return
# Update plots
product_category_plot_layout = create_product_category(filtered_data, program)
# Update the plots in the Panel layout
bar_plot_pane1.object = product_category_plot_layout
program_widget_historic.param.watch(update_plots_product_category, 'value')
#######################################################
# Bar chart 2 - Pivot_table_scope_filtered_aggregated
########################################################
def create_product_category_total_qty(Pivot_table_scope_filtered_aggregated, program):
# Filter data by the default program
filtered_data = Pivot_table_scope_filtered_aggregated[Pivot_table_scope_filtered_aggregated['Program'] == program]
if filtered_data.empty:
print("No data found for the default program.")
return None
# Sort the filtered data by 'Total Quantity'
filtered_data = filtered_data.sort_values(by='Total Quantity')
# Create the plot
product_category_total_qty = filtered_data.hvplot.bar(
x='Product Category',
y='Total Quantity',
title=None, # Remove the built-in title
xlabel='Product Category',
ylabel='Total quantity of Top-Level',
cmap=colors_palette,
color='Product Category',
legend='top_left',
bar_width=0.6, # Set bar width
tools=[]
)
# Render the plot to Bokeh
bokeh_product_category_total_qty_plot = hv.render(product_category_total_qty, backend='bokeh')
# Set explicit dimensions for the Bokeh plot
bokeh_product_category_total_qty_plot.width = 400
bokeh_product_category_total_qty_plot.height = 600
# Apply customizations
bokeh_product_category_total_qty_plot = customize_distribution_plot(bokeh_product_category_total_qty_plot)
#New 10/09
# Remove borders from the bars inside the 'create_product_category' function
for renderer in bokeh_product_category_total_qty_plot.renderers:
if isinstance(renderer.glyph, VBar): # Check if the renderer is a VBar
renderer.glyph.line_color = None # Remove the borders from the bars
# Create the HTML formatted title using Div
title_text = """<span style="font-size: 16px; color: #305496; margin-left: 60px;">
<b>Total</b> Top-Level <b>Quantity</b> by Product Category
</span>"""
title_product_category_total_qty_plot = Div(text=title_text)
# Remove existing HoverTools (if any) before adding a new one
bokeh_product_category_total_qty_plot.tools = [tool for tool in bokeh_product_category_total_qty_plot.tools if not isinstance(tool, HoverTool)]
# Add HoverTool with custom formatting
hover = HoverTool()
hover.tooltips = [
("Product Category", "@Product_Category"),
("Total Quantity", "@Total_Quantity")
]
# Add HoverTool to the plot
bokeh_product_category_total_qty_plot.add_tools(hover)
# Remove wheel zoom from active tools if you want it inactive by default
bokeh_product_category_total_qty_plot.tools = [tool for tool in bokeh_product_category_total_qty_plot.tools if not isinstance(tool, WheelZoomTool)]
# Combine title and plot into a Panel Column layout
product_category_total_qty_layout = pn.Column(title_product_category_total_qty_plot, bokeh_product_category_total_qty_plot)
return product_category_total_qty_layout
def update_plots_product_category_total_qty(event):
# Get the selected program from the widget
#program = program_widget_historic.value
program = event.new if event else program_widget_historic.value
# Filter data by the selected program
filtered_data = Pivot_table_scope_filtered_aggregated[Pivot_table_scope_filtered_aggregated['Program'] == program]
if filtered_data.empty:
print("No data found for the selected program.")
return
# Update plots
product_category_total_qty_layout = create_product_category_total_qty(filtered_data, program)
# Update the plots in the Panel layout
bar_plot_pane2.object = product_category_total_qty_layout
program_widget_historic.param.watch(update_plots_product_category_total_qty, 'value')
####################################################################
# Bar chart 3 - Production Status, Pty_Indice_Count_Production_Status
######################################################################
# 10/25 - use program as argument instead of default_program_historic
def create_production_status(aggregation_by_production_status, program):
# Filter data by the default program
filtered_data = aggregation_by_production_status[aggregation_by_production_status['Program'] == program]
if filtered_data.empty:
print("No data found for the default program.")
return None
# Sort the filtered data by 'Pty_Indice_Count_Production_Status'
filtered_data = filtered_data.sort_values(by='Pty_Indice_Count_Production_Status')
# Create the plot
production_status_plot = filtered_data.hvplot.bar(
x='Production Status',
y='Pty_Indice_Count_Production_Status',
title=None, # Remove the built-in title
xlabel='Production Status',
ylabel='Nb of unique Top-Level',
cmap=colors_palette,
color='Production Status',
legend='top_left',
bar_width=0.6,
tools=[]
)
# Create the HTML formatted title using Div
title_text = """<span style="font-size: 16px; color: #305496; margin-left: 60px;">
<b>Unique</b> Top-level <b>Quantity</b> by Production Status
</span>"""
title_production_status_plot = Div(text=title_text)
# Render the plot to Bokeh
bokeh_production_status_plot = hv.render(production_status_plot, backend='bokeh')
# Set explicit dimensions for the Bokeh plot
bokeh_production_status_plot.width = 600
bokeh_production_status_plot.height = 600
# Apply customizations
bokeh_production_status_plot = customize_distribution_plot(bokeh_production_status_plot)
#New 10/09
# Remove borders from the bars inside the 'create_product_category' function
for renderer in bokeh_production_status_plot.renderers:
if isinstance(renderer.glyph, VBar): # Check if the renderer is a VBar
renderer.glyph.line_color = None # Remove the borders from the bars
# Remove existing HoverTools and add a new one
bokeh_production_status_plot.tools = [tool for tool in bokeh_production_status_plot.tools if not isinstance(tool, HoverTool)]
hover = HoverTool()
hover.tooltips = [
("Production Status", "@Production_Status"),
("Unique PN", "@Pty_Indice_Count_Production_Status")
]
bokeh_production_status_plot.add_tools(hover)
# Remove wheel zoom from active tools
bokeh_production_status_plot.tools = [tool for tool in bokeh_production_status_plot.tools if not isinstance(tool, WheelZoomTool)]
# Combine title and plot into a Panel Column layout and return it
production_status_layout = pn.Column(title_production_status_plot, bokeh_production_status_plot)
return production_status_layout
def update_plots_production_status(event):
# Get the selected program from the widget
#program = program_widget_historic.value
# Get the selected program from the widget's value if event is None
program = event.new if event else program_widget_historic.value
# Filter aggregation_by_production_status by the selected program
filtered_data = aggregation_by_production_status[aggregation_by_production_status['Program'] == program]
# Check if the program has data in aggregation_by_production_status
if aggregation_by_production_status[aggregation_by_production_status['Program'] == program].empty:
print("No data found for the selected program.")
return
# Update the production status plot layout, passing the full dataset and selected program
production_status_layout = create_production_status(aggregation_by_production_status, program)
# Update the bar plot pane with the new layout
bar_plot_pane3.object = production_status_layout
program_widget_historic.param.watch(update_plots_production_status, 'value')
#######################
# Create initial plots
########################
# Create the initial production status layout - New 10/07
product_category_plot_layout = create_product_category(aggregation_by_product_category, default_program_historic) # 09/01 This plot aggregation_by_product_category is not updated
product_category_total_qty_layout = create_product_category_total_qty(Pivot_table_scope_filtered_aggregated, default_program_historic)
production_status_layout = create_production_status(aggregation_by_production_status, default_program_historic) # OK
#Create Layout panes - New 10/07
bar_plot_pane1 = pn.panel(product_category_plot_layout, sizing_mode='fixed', height=600, width=400)
bar_plot_pane2 = pn.panel(product_category_total_qty_layout, sizing_mode='fixed', height=600, width=400)
bar_plot_pane3 = pn.panel(production_status_layout, sizing_mode='fixed', height=600, width=600) # Correct usage of panel - OK
######################################
# Create text bellow graphs
########################################
# Convert 'Last Update' to datetime format
df_Priority['Last Update'] = pd.to_datetime(df_Priority['Last Update'], format='%m/%d/%Y', errors='coerce')
# Format the date as a short date (MM-DD-YYYY)
df_Priority['Last Update'] = df_Priority['Last Update'].dt.strftime('%m-%d-%Y')
# Extract the single date value from the DataFrame
Date_CM_Priority = df_Priority['Last Update'].iloc[0]
text_below_product_category = pn.pane.HTML(
f"This graph is based on data from |CM-Priority| - <b>{Date_CM_Priority}</b>:<br>"
"▷ <b>Unique Top-Level Quantity </b>: Number of unique Part Number for each Category of Product.<br>"
"▷ <b>This graph includes</b>: All PN since the beginning of the transfer for the selected program included the 'redlist'.<br>"
"▷ <b>Redlist</b>: PN 'to be transferred' or 'Officially transferred', not yet 'Canceled' - Canceled order are filtered-out of this graph.<br>",
width=450 # Match the width of bar_plot_pane1 and bar_plot_pane2
)
text_below_product_category_total_qty = pn.pane.HTML(
f"This graph is based on data from |CM-Priority| - <b>{Date_CM_Priority}</b>:<br>"
"▷ <b>Total Quantity Top-Level</b>: Total Quantity of parts either in SEDA's Backlog for each category. The follow-up orders are included in this 'Total Quantity'.<br>"
"▷ <b>This graph includes</b>: All PN since the beginning of the transfer for the selected program included the 'redlist' (not yet canceled).<br>"
"▷ <b>Redlist</b>: PN 'to be transferred' or Officially transferred, not yet 'Canceled' - Canceled order are filtered-out of this graph.<br>",
width=450 # Match the width of bar_plot_pane2
)
text_below_production_status = pn.pane.HTML(
f"This graph is based on data from |CM-Priority| - <b>{Date_CM_Priority}</b>:<br>"
"▷ <b>Unique Quantity Top-Level </b>: Number of unique Part Number for each Category of Product.<br>"
"▷ <b>This graph includes</b>: All PN since the beginning of the transfer for the selected program included the 'redlist' & Canceled order.<br>",
width=600 # Match the width of bar_plot_pane3
)
#New 08/15
###########################################################################################################################################################################################
# Chart 4 - Percentage distribution of Product Category & Production Status --> Base on intial dataframe 'Pourcentage_distribution'
#Representing the % each product category and Proudction status based on TOTAL QTY Top-LeveL --> Dataframes df_melted_status_percentage_pivot & df_melted_category_percentage_pivot
##########################################################################################################################################################################################
# Combinaison of Dataframes df_melted_status_percentage_pivot & df_melted_category_percentage_pivot on create_percentage_product_category_production_status
# Updated 09/17
def create_percentage_product_category_production_status(df_melted_status_percentage_pivot, df_melted_category_percentage_pivot, program, colors_palette):
# Filter data by the default program
filtered_data_status = df_melted_status_percentage_pivot[df_melted_status_percentage_pivot['Program'] == program]
filtered_data_category = df_melted_category_percentage_pivot[df_melted_category_percentage_pivot['Program'] == program]
# Ensure 'Percentage Status' is numeric, fill NaN values with 0
filtered_data_status['Percentage Status'] = pd.to_numeric(filtered_data_status['Percentage Status'], errors='coerce').fillna(0)
filtered_data_category['Percentage Status'] = pd.to_numeric(filtered_data_category['Percentage Status'], errors='coerce').fillna(0)
# Convert categorical columns to categorical type
filtered_data_status['Production Status'] = pd.Categorical(filtered_data_status['Production Status'])
filtered_data_category['Product Category'] = pd.Categorical(filtered_data_category['Product Category'])
# Sort data by 'Percentage Status' by ascending order (smallest to largest)
filtered_data_status = filtered_data_status.sort_values(by='Percentage Status', ascending=False)
filtered_data_category = filtered_data_category.sort_values(by='Percentage Status', ascending=False)
# Extract the categories and corresponding colors
categories_status = filtered_data_status['Production Status'].cat.categories
categories_category = filtered_data_category['Product Category'].cat.categories
# Ensure the palette covers all categories
colors_palette_status = [colors_palette.get(cat, '#808080') for cat in categories_status]
colors_palette_category = [colors_palette.get(cat, '#808080') for cat in categories_category]
# Convert DataFrame to ColumnDataSource
source_status = ColumnDataSource(filtered_data_status)
source_category = ColumnDataSource(filtered_data_category)
# HTML title for Product Category
title_text_status_distribution = """<span style="font-size: 16px; color: #305496; margin-left: 150px;">
Production Status <b>Distribution</b> based on <b>Total Quantity</b>
</span>"""
title_status_plot_distribution = Div(text=title_text_status_distribution)
# Create figure for Production Status
status_plot = figure(
y_range=FactorRange(*filtered_data_status['Production Status']),
x_axis_label='Percentage Production Status',
y_axis_label='Production Status',
x_range=(0, 100), # Set x-axis range from 0 to 100
title=None
)
# Rename columns just before configuring HoverTool
filtered_data_status_renamed = filtered_data_status.rename(columns={
'Production Status': 'Production_Status',
'Percentage Status': 'Percentage_Status'
})
source_status_renamed = ColumnDataSource(filtered_data_status_renamed)
# Remove existing hover tool (if any) and create a new one
status_plot.tools = [tool for tool in status_plot.tools if not isinstance(tool, HoverTool)]
hover_status = HoverTool(
tooltips=[
("Production Status", "@Production_Status"),
("Percentage", "@Percentage_Status{0.0f}%")
]
)
status_plot.add_tools(hover_status)
status_plot.hbar(
y='Production_Status',
right='Percentage_Status',
source=source_status_renamed,
height=0.6, # Thickness of each bar
color=factor_cmap('Production_Status', palette=colors_palette_status, factors=categories_status),
legend_field='Production_Status' # Add legend field
)
# Set fixed dimensions directly here
status_plot.width = 600
status_plot.height = 600
# HTML title for Product Category
title_text_category_distribution = """<span style="font-size: 16px; color: #305496; margin-left: 50px;">
Product Category <b>Distribution</b> based on <b>Total Quantity</b>
</span>"""
title_category_plot_distribution = Div(text=title_text_category_distribution)
# Create figure for Product Category
category_plot = figure(
y_range=FactorRange(*filtered_data_category['Product Category']),
x_axis_label='Percentage Product Category',
y_axis_label='Product Category',
x_range=(0, 100), # Set x-axis range from 0 to 100
title=None
)
# Rename columns just before configuring HoverTool
filtered_data_category_renamed = filtered_data_category.rename(columns={
'Product Category': 'Product_Category',
'Percentage Status': 'Percentage_Status'
})
source_category_renamed = ColumnDataSource(filtered_data_category_renamed)
# Remove existing hover tool (if any) and create a new one
category_plot.tools = [tool for tool in category_plot.tools if not isinstance(tool, HoverTool)]
hover_category = HoverTool(
tooltips=[
("Product Category", "@Product_Category"),
("Percentage", "@Percentage_Status{0.0f}%")
]
)
category_plot.add_tools(hover_category)
category_plot.hbar(
y='Product_Category',
right='Percentage_Status',
source=source_category_renamed,
height=0.6, # Thickness of each bar
color=factor_cmap('Product_Category', palette=colors_palette_category, factors=categories_category),
legend_field='Product_Category' # Add legend field
)
category_plot.legend.location = 'top_right' # Position the legend in the top right
status_plot.legend.location = 'top_right' # Position the legend in the top right
# Apply customization for consistency with other graphs
status_plot = customize_distribution_plot(status_plot)
category_plot = customize_distribution_plot(category_plot)
# Customize grid lines
status_plot.xgrid.grid_line_color = '#E0E0E0'
status_plot.xgrid.grid_line_dash = [4, 6] # Dash style for x-axis grid lines
status_plot.ygrid.grid_line_color = None # Remove y-axis grid lines
category_plot.xgrid.grid_line_color = '#E0E0E0'
category_plot.xgrid.grid_line_dash = [4, 6] # Dash style for x-axis grid lines
category_plot.ygrid.grid_line_color = None # Remove y-axis grid lines
# Set fixed dimensions directly here
category_plot.width = 450
category_plot.height = 600
# Arrange plots and titles in columns
status_layout_distribution = column(title_status_plot_distribution, status_plot)
category_layout_distribution = column(title_category_plot_distribution, category_plot)
#return status_plot_unique, category_plot_unique # Update 10/07
return status_layout_distribution, category_layout_distribution
def update_plot_percentage_product_category_production_status(event):
program = program_widget_historic.value
# Filter data by the selected program
filtered_data_status = df_melted_status_percentage_pivot[df_melted_status_percentage_pivot['Program'] == program]
filtered_data_category = df_melted_category_percentage_pivot[df_melted_category_percentage_pivot['Program'] == program]
if filtered_data_status.empty or filtered_data_category.empty:
print("No data found for the selected program.")
return
# Get the plot layout
status_layout_distribution, category_layout_distribution = create_percentage_product_category_production_status(
df_melted_status_percentage_pivot,
df_melted_category_percentage_pivot,
program,
colors_palette # Make sure colors_palette is passed here
)
# Extract the actual plot (assuming it's the second child in the layout)
bokeh_status_plot = status_layout_distribution.children[1] # Accessing the second child (the plot)
bokeh_category_plot = category_layout_distribution.children[1] # Accessing the second child (the plot)
# Customize grid lines
#bokeh_status_plot.xgrid.grid_line_color = '#E0E0E0'
#bokeh_status_plot.xgrid.grid_line_dash = [4, 6] # Dash style for x-axis grid lines
#bokeh_status_plot.ygrid.grid_line_color = None # Remove y-axis grid lines
#bokeh_category_plot.xgrid.grid_line_color = '#E0E0E0'
#bokeh_category_plot.xgrid.grid_line_dash = [4, 6] # Dash style for x-axis grid lines
#bokeh_category_plot.ygrid.grid_line_color = None # Remove y-axis grid lines
# Set fixed dimensions
bokeh_status_plot.width = 600
bokeh_status_plot.height = 600
bokeh_category_plot.width = 450
bokeh_category_plot.height = 600
# Update Bokeh plot panes
bar_plot_pane_status_percentage.object = bokeh_status_plot
bar_plot_pane_category_percentage.object = bokeh_category_plot
#######################
# Create initial plots
########################
bokeh_status_plot, bokeh_category_plot = create_percentage_product_category_production_status(
df_melted_status_percentage_pivot,
df_melted_category_percentage_pivot,
default_program_historic,
colors_palette # Make sure colors_palette is passed here
)
# Create Bokeh plot panes with fixed dimensions
bar_plot_pane_status_percentage = pn.pane.Bokeh(bokeh_status_plot, sizing_mode='fixed', height=600, width=600)
bar_plot_pane_category_percentage = pn.pane.Bokeh(bokeh_category_plot, sizing_mode='fixed', height=600, width=450)
#update_plot_percentage_product_category_production_status(None)
####################################################
# Watch the widget and update the plot on change
####################################################
program_widget_historic.param.watch(update_plot_percentage_product_category_production_status, 'value')
######################################################################################################################################################################################
# Chart 5 - Percentage distribution of Product Category & Production Status --> Base on intial dataframe 'Pourcentage_distribution'
#Representing the % each product category and Proudction status based on UNIQUE Top-Level --> Dataframes df_melted_status_percentage_pivot & df_melted_category_percentage_pivot
#######################################################################################################################################################################################
# Updated 09/17
def create_percentage_product_category_production_status_UNIQUE(status_percentage_unique_pivot, category_percentage_unique_pivot, program, colors_palette):
#program = default_program_historic
# Filter data by the default program
filtered_data_status = status_percentage_unique_pivot[status_percentage_unique_pivot['Program'] == program]
filtered_data_category = category_percentage_unique_pivot[category_percentage_unique_pivot['Program'] == program]
if filtered_data_status.empty or filtered_data_category.empty:
print("No data found for the default program.")
return None, None
# Transform filtered_data_status into the desired format
transformed_status = filtered_data_status.melt(
id_vars=['Program'],
var_name='Production Status',
value_name='Percentage'
)
# Transform filtered_data_category into the desired format
transformed_category = filtered_data_category.melt(
id_vars=['Program'],
var_name='Product Category',
value_name='Percentage'
)
# Ensure 'Percentage' is numeric, fill NaN values with 0
transformed_status['Percentage'] = pd.to_numeric(transformed_status['Percentage'], errors='coerce').fillna(0)
transformed_category['Percentage'] = pd.to_numeric(transformed_category['Percentage'], errors='coerce').fillna(0)
# Sort data by 'Percentage' in descending order
transformed_status = transformed_status.sort_values(by='Percentage', ascending=False)
transformed_category = transformed_category.sort_values(by='Percentage', ascending=False)
# Convert categorical columns to categorical type
transformed_status['Production Status'] = pd.Categorical(transformed_status['Production Status'])
transformed_category['Product Category'] = pd.Categorical(transformed_category['Product Category'])
# Extract the categories and corresponding colors
categories_status = transformed_status['Production Status'].cat.categories
categories_category = transformed_category['Product Category'].cat.categories
# Ensure the palette covers all categories
colors_palette_status = [colors_palette.get(cat, '#808080') for cat in categories_status]
colors_palette_category = [colors_palette.get(cat, '#808080') for cat in categories_category]
# Convert DataFrame to ColumnDataSource
source_status = ColumnDataSource(transformed_status)
source_category = ColumnDataSource(transformed_category)
# Rename columns for hover tool compatibility
transformed_status_renamed = transformed_status.rename(columns={
'Production Status': 'Production_Status',
'Percentage': 'Percentage_Status'
})
transformed_category_renamed = transformed_category.rename(columns={
'Product Category': 'Product_Category',
'Percentage': 'Percentage_Status'
})
# Convert renamed DataFrames to ColumnDataSource
source_status_renamed = ColumnDataSource(transformed_status_renamed)
source_category_renamed = ColumnDataSource(transformed_category_renamed)
# HTML title for Production Status - margin-left creates a gap before the title
title_text_status = """<span style="font-size: 16px; color: #305496; margin-left: 130px;">
Production Status <b>Distribution</b> based on <b>Unique</b> Top-Level
</span>"""
title_status_plot_unique = Div(text=title_text_status)
# Create figure for Production Status without title
status_plot_unique = figure(
y_range=FactorRange(*transformed_status['Production Status']),
x_axis_label='Percentage Production Status',
y_axis_label='Production Status',
x_range=(0, 100),
title=None
)
# Remove existing hover tool (if any) and create a new one
status_plot_unique.tools = [tool for tool in status_plot_unique.tools if not isinstance(tool, HoverTool)]
hover_status = HoverTool(
tooltips=[
("Production Status", '@Production_Status'),
("Percentage", "@Percentage_Status{0.0f}%")
]
)
status_plot_unique.add_tools(hover_status)
status_plot_unique.hbar(
y='Production_Status',
right='Percentage_Status',
source=source_status_renamed,
height=0.6, # Thickness of each bar
color=factor_cmap('Production_Status', palette=colors_palette_status, factors=categories_status),
legend_field='Production_Status' # Add legend field
)
# Set fixed dimensions directly here
status_plot_unique.width = 600
status_plot_unique.height = 600
# HTML title for Product Category
title_text_category = """<span style="font-size: 16px; color: #305496; margin-left: 20px;">
Product Category <b>Distribution</b> based on <b>Unique</b> Top-Level
</span>"""
title_category_plot_unique = Div(text=title_text_category)
# Create figure for Product Category
category_plot_unique = figure(
y_range=FactorRange(*transformed_category['Product Category']),
x_axis_label='Percentage Product Category',
y_axis_label='Product Category',
x_range=(0, 100),
title=None
)
# Remove existing hover tool (if any) and create a new one
category_plot_unique.tools = [tool for tool in category_plot_unique.tools if not isinstance(tool, HoverTool)]
hover_category = HoverTool(
tooltips=[
("Product Category", '@Product_Category'),
("Percentage", "@Percentage_Status{0.0f}%")
]
)
category_plot_unique.add_tools(hover_category)
category_plot_unique.hbar(
y='Product_Category',
right='Percentage_Status',
source=source_category_renamed,
height=0.6, # Thickness of each bar
color=factor_cmap('Product_Category', palette=colors_palette_category, factors=categories_category),
legend_field='Product_Category' # Add legend field
)
# Set fixed dimensions directly here
category_plot_unique.width = 450
category_plot_unique.height = 600
# Set legend position
category_plot_unique.legend.location = 'top_right' # Position the legend in the top right
status_plot_unique.legend.location = 'top_right' # Position the legend in the top right
# Apply customization for consistency with other graphs
status_plot_unique = customize_distribution_plot(status_plot_unique)
category_plot_unique = customize_distribution_plot(category_plot_unique)
# Customize grid lines
status_plot_unique.xgrid.grid_line_color = '#E0E0E0'
status_plot_unique.xgrid.grid_line_dash = [4, 6] # Dash style for x-axis grid lines
status_plot_unique.ygrid.grid_line_color = None # Remove y-axis grid lines
category_plot_unique.xgrid.grid_line_color = '#E0E0E0'
category_plot_unique.xgrid.grid_line_dash = [4, 6] # Dash style for x-axis grid lines
category_plot_unique.ygrid.grid_line_color = None # Remove y-axis grid lines
# Arrange plots and titles in columns
status_layout_unique = column(title_status_plot_unique, status_plot_unique)
category_layout_unique = column(title_category_plot_unique, category_plot_unique)
#return status_plot_unique, category_plot_unique # Update 10/07
return status_layout_unique, category_layout_unique
# Update function for widget changes
def update_plot_percentage_product_category_production_status_UNIQUE(event):
program = program_widget_historic.value # Fetch selected program
# Filter data
filtered_data_status = status_percentage_unique_pivot[status_percentage_unique_pivot['Program'] == program]
if filtered_data_status.empty:
print(f"No data found for the selected program: {program}")
return
# Create updated plots
bokeh_status_unique_plot, bokeh_category_unique_plot = create_percentage_product_category_production_status_UNIQUE(
status_percentage_unique_pivot,
category_percentage_unique_pivot,
program,
colors_palette
)
# Update Bokeh plot panes
bar_plot_pane_status_unique.object = bokeh_status_unique_plot
bar_plot_pane_category_unique.object = bokeh_category_unique_plot
# Create initial plots
bokeh_status_unique_plot, bokeh_category_unique_plot = create_percentage_product_category_production_status_UNIQUE(
status_percentage_unique_pivot,
category_percentage_unique_pivot,
default_program_historic,
colors_palette
)
#######################
# Create initial plots
######################
# Create initial plots --> 2 distinct plots
bokeh_status_unique_plot, bokeh_category_unique_plot = create_percentage_product_category_production_status_UNIQUE(status_percentage_unique_pivot, category_percentage_unique_pivot, default_program_historic, colors_palette)
# Create Bokeh plots panes with fixed dimensions
bar_plot_pane_status_unique = pn.pane.Bokeh(bokeh_status_unique_plot, sizing_mode='fixed', height=600, width=600)
bar_plot_pane_category_unique = pn.pane.Bokeh(bokeh_category_unique_plot, sizing_mode='fixed', height=600, width=450)
####################################################
# Watch the widget and update the plot on change
####################################################
program_widget_historic.param.watch(update_plot_percentage_product_category_production_status_UNIQUE, 'value')
###############################
# Text bellow Chart 4
#############################
# Define text components with fixed widths to match plot widths
text_below_percentage_product_category = pn.pane.HTML(
f"These graphs are based on data from |CM-Priority| - <b>{Date_CM_Priority}</b>:<br>"
"▷ <b>% Product Category</b>: Percentage of each Product Categories based on the 'Total Quantity' of each PN.<br>"
"▷ <b>These Graphs includes</b>: All PN since the beginning of the transfer for the selected program included the 'redlist'.<br>",
width=450 # Match the width of bar_plot_pane4 which is 2 grpahs of 225
)
# Define text components with fixed widths to match plot widths
text_below_percentage_production_status = pn.pane.HTML(
f"These graphs are based on data from |CM-Priority| - <b>{Date_CM_Priority}</b>:<br>"
"▷ <b>% Production Status</b>: Percentage of each Production Statuses based on the 'Total Quantity' of each PN.<br>"
"▷ <b>These Graphs includes</b>: All PN since the beginning of the transfer for the selected program included the 'redlist'.<br>",
width=620 # Match the width of bar_plot_pane4 which is 2 grpahs of 225
)
####################################################################################
# Text bellow Chart 5 - bar_plot_pane_status_unique & bar_plot_pane_category_unique
#####################################################################################
# Define text components with fixed widths to match plot widths
text_below_percentage_product_category_UNIQUE = pn.pane.HTML(
f"These graphs are based on data from |CM-Priority| - <b>{Date_CM_Priority}</b>:<br>"
"▷ <b>% Product Category</b>: Percentage of each Product Categories based on the number of unique PN.<br>"
"▷ <b>These Graphs includes</b>: All PN since the beginning of the transfer for the selected program included the 'redlist'.<br>",
width=450 # Match the width of bar_plot_pane4 which is 2 grpahs of 225
)
# Define text components with fixed widths to match plot widths
text_below_percentage_production_status_UNIQUE = pn.pane.HTML(
f"These graphs are based on data from |CM-Priority| - <b>{Date_CM_Priority}</b>:<br>"
"▷ <b>% Production Status</b>: Percentage of each Production Statuses based on the number of unique PN.<br>"
"▷ <b>These Graphs includes</b>: All PN since the beginning of the transfer for the selected program included the 'redlist'.<br>",
width=650 # Match the width of bar_plot_pane4 which is 2 grpahs of 225
)
#//////////////////////////////////////////////////#//////////////////////////////////////////////////
#################################################################################################
# LAYOUT - Combine plots into a vertical Panel layout - Chart 1 to 5
####################################################################################################
#//////////////////////////////////////////////////#//////////////////////////////////////////////////
#Create vertical layouts for each plot and its corresponding text
plot1_layout = pn.Column(
bar_plot_pane1,
pn.Spacer(height=30),
text_below_product_category
)
plot2_layout = pn.Column(
bar_plot_pane2,
pn.Spacer(height=30),
text_below_product_category_total_qty
)
plot3_layout = pn.Column(
bar_plot_pane3,
pn.Spacer(height=30),
text_below_production_status
)
plot_category_percentage = pn.Column(
bar_plot_pane_category_percentage,
pn.Spacer(height=30),
text_below_percentage_product_category
)
plot_status_percentage = pn.Column(
bar_plot_pane_status_percentage,
pn.Spacer(height=30),
text_below_percentage_production_status
)
plot_category_unique = pn.Column(
bar_plot_pane_category_unique,
pn.Spacer(height=30),
text_below_percentage_product_category_UNIQUE
)
plot_status_unique = pn.Column(
bar_plot_pane_status_unique,
pn.Spacer(height=30),
text_below_percentage_production_status_UNIQUE
)
###############################################
# Update 09/16
# Create a vertical divider with custom CSS
###############################################
vertical_divider = pn.pane.HTML(
'<div style="width: 1px; height: 800px; background-color:#D9D9D9;"></div>',
)
vertical_divider2 = pn.pane.HTML(
'<div style="width: 1px; height: 800px; background-color:#D9D9D9;"></div>',
)
###############################################################
# Production Status
##############################################################
# Combine plots and content into columns and rows
distribution_dashboard_production_status = pn.Row(
pn.Column(
pn.pane.HTML("<h2 style='font-size: 18px; color: black; text-align: center; font-weight: bold; padding-left: 15px;'> #1 Based on <u>Unique</u> Part Number in the backlog</h2>"),
pn.Row(plot3_layout, pn.Spacer(width=30), plot_status_unique) # Place plots in the same row
),
pn.Row(pn.Spacer(width=30), vertical_divider, pn.Spacer(width=30)),
pn.Column(
pn.pane.HTML("<h2 style='font-size: 18px; color: black; text-align: center; font-weight: bold; padding-left: 15px;'> #2 Based on <u>Total Quantity</u> of Top-Level in the backlog</h2>"),
plot_status_percentage
)
)
########################
# Product Category
########################
# Combine plots in another horizontal row
distribution_dashboard_product_category = pn.Row(
pn.Column(
pn.pane.HTML("<h2 style='font-size: 18px; color: black; text-align: center; font-weight: bold; padding-left: 15px;'> #1 Based on <u>Unique</u> Part Number in the backlog</h2>"),
pn.Row(plot1_layout, pn.Spacer(width=30), plot_category_unique) # Place plots in the same row
),
pn.Row(pn.Spacer(width=30), vertical_divider2, pn.Spacer(width=30)),
pn.Column(
pn.pane.HTML("<h2 style='font-size: 18px; color: black; text-align: center; font-weight: bold; padding-left: 15px;'> #2 Based on <u>Total Quantity</u> of Top-Level in the backlog</h2>"),
pn.Row(plot_category_percentage, pn.Spacer(width=30), plot2_layout) # Place plots in the same row
)
)
# Update 08/14
#//////////////////////////////////////////////////#//////////////////////////////////////////////////
###############################################################################################
# Create a bar chart graphs representing the % completion Critical Qty and % Completion Total
###############################################################################################
#//////////////////////////////////////////////////#//////////////////////////////////////////////////
# Include the Combined PN {Program} by using Pivot_table_completion_upated_combinedPN instead of Pivot_table_completion
# Filter out rows where '% Completion Total Qty' = 0 from Pivot_table_completion_upated_combinedPN
Pivot_table_completion_upated_combinedPN_filtered = Pivot_table_completion_upated_combinedPN[Pivot_table_completion_upated_combinedPN['% Completion Total Qty'] != 0]
# Create a key numbers pane (initially empty)
key_numbers_pane = pn.pane.DataFrame(pd.DataFrame(), width=700, index=False)
def update_data(event):
# Filter out rows where '% Completion Total Qty' = 0
filtered_data = Pivot_table_completion_upated_combinedPN[Pivot_table_completion_upated_combinedPN['% Completion Total Qty'] != 0]
# Define the target value for 'Pty Indice'
target_value = f'Combined PN {program_widget_historic.value}'
# Filter the DataFrame based on the target value
filtered_data_target_value = filtered_data[filtered_data['Pty Indice'] == target_value]
# Extract the desired columns
key_numbers = filtered_data_target_value[['Pty Indice', 'Qty Shipped', 'Critical Qty', 'Total Quantity', '% Completion Critical Qty', '% Completion Total Qty']]
# Update the key numbers display
key_numbers_pane.object = key_numbers # Update the key numbers pane
# Attach the callback to the widget
program_widget_historic.param.watch(update_data, 'value')
#########################################################################################################################
def customize_completion_plot(bokeh_plot):
"""Apply customizations to the % Completion plot."""
bokeh_plot.xaxis.major_label_text_font_size = '8pt'
bokeh_plot.yaxis.major_label_text_font_size = '10pt'
bokeh_plot.title.text_font_size = '12pt'
bokeh_plot.title.text_color = "#305496"
bokeh_plot.xaxis.axis_line_width = 2
bokeh_plot.yaxis.axis_line_width = 2
bokeh_plot.xaxis.major_label_orientation = 'vertical'
bokeh_plot.yaxis.major_label_orientation = 'horizontal'
bokeh_plot.yaxis.axis_label_text_font_size = '10pt'
bokeh_plot.ygrid.grid_line_color = '#F0F0F0'
bokeh_plot.ygrid.grid_line_dash = [4, 6]
bokeh_plot.toolbar.logo = None
# Cap the y-axis at 100%
bokeh_plot.y_range.end = 100
return bokeh_plot
def create_completion_plot(Pivot_table_completion_upated_combinedPN_filtered, default_program_historic):
# Filter data by default program
filtered_data = Pivot_table_completion_upated_combinedPN_filtered[Pivot_table_completion_upated_combinedPN_filtered['Program'] == default_program_historic]
if filtered_data.empty:
print("No data found for the default program.")
return None, None
# Filter rows where either % Completion Critical Qty or % Completion Total Qty is greater than 0
filtered_data = filtered_data[
(filtered_data['% Completion Critical Qty'] > 0) |
(filtered_data['% Completion Total Qty'] > 0)
]
# Melt the DataFrame to long format for plotting
melted_df = filtered_data.melt(
id_vars=['Pty Indice'],
value_vars=['% Completion Critical Qty', '% Completion Total Qty'],
var_name='Completion Type',
value_name='Completion Percentage'
)
# Create a bar chart
completion_plot = melted_df.hvplot.bar(
x='Pty Indice',
y='Completion Percentage',
color='Completion Type',
title="% Completion Critical Qty and % Completion Total Qty per Pty Indice",
xlabel='Pty Indice',
ylabel='% Completion',
cmap='Category20',
legend='top_left',
height=400,
tools=[]
)
# Render and customize the plot
bokeh_completion_plot = hv.render(completion_plot, backend='bokeh')
bokeh_completion_plot = customize_completion_plot(bokeh_completion_plot)
# Remove existing HoverTools (if any) before adding a new one
bokeh_completion_plot.tools = [tool for tool in bokeh_completion_plot.tools if not isinstance(tool, HoverTool)]
# Add HoverTool with custom formatting
hover = HoverTool()
hover.tooltips = [
("Pty Indice", "@Pty_Indice"),
("Completion Type", "@color"),
("Percentage", "@Completion_Percentage%") #"@value{0.1f}%") # Round to 1 decimal
]
bokeh_completion_plot.add_tools(hover)
# Remove wheel zoom from active tools if you want it inactive by default - 08/12
bokeh_completion_plot.tools = [tool for tool in bokeh_completion_plot.tools if not isinstance(tool, WheelZoomTool)]
return bokeh_completion_plot
def update_bar_chart(event):
# Get the selected program from the widget
program = program_widget_historic.value
#print(f"Updating plots for program: {program}")
# Filter data by the selected program
filtered_data = Pivot_table_completion_upated_combinedPN_filtered[Pivot_table_completion_upated_combinedPN_filtered['Program'] == program]
if filtered_data.empty:
print("No data found for the selected program.")
return
# Further filter rows where either % Completion Critical Qty or % Completion Total Qty is greater than 0
filtered_data = filtered_data[
(filtered_data['% Completion Critical Qty'] > 0) |
(filtered_data['% Completion Total Qty'] > 0)
]
# Melt the filtered DataFrame for plotting
melted_df = filtered_data.melt(
id_vars=['Pty Indice'],
value_vars=['% Completion Critical Qty', '% Completion Total Qty'],
var_name='Completion Type',
value_name='Completion Percentage'
)
# Update plots
bokeh_completion_plot = create_completion_plot(filtered_data, program)
# Update the plots in the Panel layout
plot_pane_completion.object = bokeh_completion_plot
# Create initial plot
bokeh_completion_plot = create_completion_plot(Pivot_table_completion_upated_combinedPN_filtered, default_program_historic)
# Convert to Panel/Bokeh
plot_pane_completion = pn.pane.Bokeh(bokeh_completion_plot, sizing_mode='stretch_width')
# Update plot initially - Needed for the sizing_mode='stretch_width' to be set
update_bar_chart(None)
#######################################################
# Watch the widget and update the plot on change
program_widget_historic.param.watch(update_bar_chart, 'value') # moved bellow 10/25
######################################
# Create text bellow graphs
########################################
'''
text_below_completion_plot = (
f"This graph is based on data from |Snapshot| & |CM-Priority|- <b> {file_date} & {Date_CM_Priority}</b>:<br>"
"▷<b>Total Quantity</b>: Is calculated as 'IDD Backlog Qty' + 'Qty Shipped' if 'Remain. crit. Qty' = 0.<br>"
"➥ 'Total Quantity' increases over time as follow-up orders are placed, while the 'Critical Quantity' is defined as part of the project scope.<br>"
"▷ <b>% Completion Critical Qty</b>: Progress based on the defined 'Critical Qty', usually incompassing the DPAS orders.<br>"
"▷ <b>% Completion Total Qty</b>: Progress based on the 'Total Qty' including the potential follow-up orders.<br>"
)
'''
text_below_completion_plot = (
f"<div style='width: 860px;'>"
f"This graph is based on data from |Snapshot| & |CM-Priority|- <b> {file_date} & {Date_CM_Priority}</b>:<br>"
"▷<b>Total Quantity</b>: Is calculated as 'IDD Backlog Qty' + 'Qty Shipped' if 'Remain. crit. Qty' = 0.<br>"
"➥ 'Total Quantity' increases over time as follow-up orders are placed, while the 'Critical Quantity' is defined as part of the project scope.<br>"
"▷ <b>% Completion Critical Qty</b>: Progress based on the defined 'Critical Qty', usually encompassing the DPAS orders.<br>"
"▷ <b>% Completion Total Qty</b>: Progress based on the 'Total Qty' including the potential follow-up orders.<br>"
"</div>"
)
text_above_key_number = (
f"▷ The Data-point <b>'Combined PN'</b>: Represents the full scope of the project for the selected 'Program'.<br>"
"➥ This data-point is a made-up PN representative of the entire scope in term of ' Total Quanyity' of the project based on the Priority List.<br>"
"➥ The Canceled orders are filtered-out but the PN 'To be transferred' are still included.<br>"
)
# Arrange text_above_key_number above key_numbers_pane
key_numbers_column = pn.Column(
text_above_key_number,
key_numbers_pane,
sizing_mode='stretch_width'
)
#create short vertical divider
vertical_divider_short = pn.pane.HTML(
'<div style="width: 1px; height: 170px; background-color:#D9D9D9;"></div>',
)
# Arrange text_below_completion_plot and key_numbers_column side-by-side
side_by_side = pn.Row(
text_below_completion_plot,
pn.Spacer(width=100), # Add space between the plot and the text
vertical_divider_short,
pn.Spacer(width=100),
key_numbers_column,
sizing_mode='stretch_width'
)
# Arrange plot_pane_completion on top and side_by_side below it
completion_dashboard = pn.Column(
plot_pane_completion,
pn.Spacer(height=50), # Add space between the plot and the text
side_by_side,
sizing_mode='stretch_width'
)
#08/21
#//////////////////////////////////////////////////#//////////////////////////////////////////////////
#######################################################################################################################
# Backlog Projection --> Quantity of PN to build per Month - Graph combined and Graph PN by PN
#######################################################################################################################
#re-load df_Backlog to erase any potnetial change on the original dataframe
#df_Backlog = pd.read_excel(input_file_formatted, sheet_name='CM-Backlog', index_col=False)
#Rename 'Backlog row Qty' to 'Backlog Qty'
#df_Backlog.rename(columns={'Backlog row Qty': 'Backlog Qty'}, inplace=True)
# 09/19 ---> to be updated with 'Requested Date' & 'Month Requested' <---
#//////////////////////////////////////////////////#//////////////////////////////////////////////////
#Preparation of dataframes backlog_monthly_summary based on df_Backlog
backlog_monthly_summary = df_Backlog.copy()
#Filter relevant column from df_Backlog
backlog_monthly_summary = backlog_monthly_summary[['Priority', 'Pty Indice', 'IDD Top Level', 'SEDA Top Level', 'Backlog Qty', 'Marge standard', 'Site', 'Order', 'Invoice name', 'Requested Date', 'Due Date','Actual amount -standard', 'Month', 'Month Requested', 'Product_Category', 'Complexity', 'Program']]
#print('backlog_monthly_summary')
#display(backlog_monthly_summary)
###############################################################
# backlog_monthly_summary dataframe
###############################################################
# Ensure 'Due Date' is in datetime format
backlog_monthly_summary['Due Date'] = pd.to_datetime(backlog_monthly_summary['Due Date'])
backlog_monthly_summary['Requested Date'] = pd.to_datetime(backlog_monthly_summary['Requested Date'])
# Rename columns
backlog_monthly_summary = backlog_monthly_summary.rename(columns={
'Actual amount -standard': 'Sales',
'Marge standard': 'IDD Marge Standard',
'Complexity': 'Average Complexity',
})
# Filter to exclude rows where 'Order' contains 'NC'
backlog_monthly_summary = backlog_monthly_summary[~backlog_monthly_summary['Order'].str.contains('NC')]
#####################################
# Sorting backlog_monthly_summary
######################################
# Function to check if a value is numeric
def is_numeric(val):
try:
int(val)
return True
except ValueError:
return False
# Separate numeric and non-numeric 'Priority' values
backlog_numeric_priority = backlog_monthly_summary[backlog_monthly_summary['Priority'].apply(is_numeric)]
backlog_non_numeric_priority = backlog_monthly_summary[~backlog_monthly_summary['Priority'].apply(is_numeric)]
# Convert 'Priority' values to integers for numeric priorities
backlog_numeric_priority['Priority'] = backlog_numeric_priority['Priority'].astype(int)
# Sort numeric priorities in ascending order
#backlog_numeric_priority = backlog_numeric_priority.sort_values(by='Priority', ascending=True) #Update 08/28
backlog_numeric_priority.sort_values(by=['Priority', 'Pty Indice'], inplace=True)
# Combine the DataFrames, placing numeric priorities first and non-numeric priorities at the end
backlog_monthly_summary_sorted = pd.concat([backlog_numeric_priority, backlog_non_numeric_priority])
# Reset index if needed
backlog_monthly_summary_sorted.reset_index(drop=True, inplace=True)
# Update the original DataFrame
backlog_monthly_summary = backlog_monthly_summary_sorted
#print('backlog_monthly_summary:')
#display(backlog_monthly_summary)
##########################
########################################################################
# Create datafram for Graph 1 by grouping by 'Month' and 'Program'
#########################################################################
#Update 09/19
# 'Month' is related to the 'Due Date' whcih correspond to the modified PO set to build a more sustainable backlog
# 'Month Requested' is related to the 'Requested Date' whcih correspond to the original PO placed by SEDA
###########################
backlog_monthly_summary_combined = backlog_monthly_summary.groupby(['Month', 'Program']).agg({
'Backlog Qty': 'sum',
'Sales': 'sum',
'Pty Indice': lambda x: ', '.join(map(str, x)),
'IDD Top Level': lambda x: ', '.join(x),
'SEDA Top Level': lambda x: ', '.join(x),
'IDD Marge Standard': 'sum',
'Due Date': 'first', # Keep the 'Invoice date' as the first date in each group
'Average Complexity': 'mean' # Calculate the average complexity
}).reset_index()
# Define a function to format numbers with 1 decimal digit if necessary
def format_complexity(value):
if pd.isna(value): # Handle NaN values
return value
elif value.is_integer():
return int(value) # Return as integer if value is an integer
else:
return round(value, 1) # Round to 1 decimal place otherwise
# Apply the formatting function to 'Complexity' column
backlog_monthly_summary_combined['Average Complexity'] = backlog_monthly_summary_combined['Average Complexity'].apply(format_complexity)
#Create 'Normalized Complexity'
backlog_monthly_summary_combined['Normalized Complexity'] = backlog_monthly_summary_combined['Average Complexity']*backlog_monthly_summary_combined['Backlog Qty']
#print('backlog_monthly_summary_combined')
#display(backlog_monthly_summary_combined)
###################################
# Fill NaN values appropriately
###################################
# Fill numeric columns with 0
Backlog_numeric_cols = backlog_monthly_summary_combined.select_dtypes(include='number').columns
backlog_monthly_summary_combined[Backlog_numeric_cols] = backlog_monthly_summary_combined[Backlog_numeric_cols].fillna(0)
# Fill string columns with ''
Backlog_string_cols = backlog_monthly_summary_combined.select_dtypes(include='object').columns
backlog_monthly_summary_combined[Backlog_string_cols] = backlog_monthly_summary_combined[Backlog_string_cols].fillna('')
# Sort by 'Invoice date' in descending order
backlog_monthly_summary_combined = backlog_monthly_summary_combined.sort_values(by='Due Date', ascending=True)
# Display the updated DataFrame
#print('backlog_monthly_summary_combined:')
#display(backlog_monthly_summary_combined)
##################################################################################################
# Backlog graph 1 - Combined Quantity of PN to build per months
##################################################################################################
##################################################################################################
# Backlog graph 2 - Quantity of PN to build per month for each given Pty Indice
####################################################################################################
##################################################################################################
# Backlog graph 3 - XXXX
####################################################################################################
##################################################################################################
# Backlog graph 4 - Backlog Projection using 'Month Requested' and Requested Date'
####################################################################################################
# Load backlog
df_Backlog_overview = df_Backlog.copy()
### update 08/23
# Custom color palette with alpha transparency
custom_palette_bkg = {
'Backlog Qty': '#cdbedd',
'Sales': '#63BE7B',
'IDD Marge Standard': '#E2EFDA',
'Normalized Complexity': 'rgba(255, 47, 47, 0.7)' # Alpha applied
}
def customize_qty_backlog_plot(bokeh_plot):
""" Apply customizations to the Quantity backlog plot. """
bokeh_plot.xaxis.major_label_text_font_size = '8pt'
bokeh_plot.yaxis.major_label_text_font_size = '10pt'
bokeh_plot.title.text_font_size = '12pt'
bokeh_plot.title.text_color = "#305496"
bokeh_plot.xaxis.axis_line_width = 2
bokeh_plot.yaxis.axis_line_width = 2
bokeh_plot.xaxis.major_label_orientation = 'vertical'
bokeh_plot.yaxis.major_label_orientation = 'horizontal'
bokeh_plot.yaxis.axis_label_text_font_size = '10pt'
bokeh_plot.ygrid.grid_line_color = '#E0E0E0'
bokeh_plot.ygrid.grid_line_dash = [4, 6]
bokeh_plot.toolbar.logo = None
return bokeh_plot
def customize_total_quantity_backlog_plot(bokeh_plot):
""" Apply customizations to the Total Quantity plot. """
bokeh_plot.xaxis.major_label_text_font_size = '6pt'
bokeh_plot.yaxis.major_label_text_font_size = '10pt'
bokeh_plot.title.text_font_size = '12pt'
bokeh_plot.title.text_color = "#305496"
bokeh_plot.xaxis.axis_line_width = 2
bokeh_plot.yaxis.axis_line_width = 2
bokeh_plot.xaxis.major_label_orientation = 'vertical'
bokeh_plot.yaxis.major_label_orientation = 'horizontal'
bokeh_plot.yaxis.axis_label_text_font_size = '10pt'
bokeh_plot.ygrid.grid_line_color = '#E0E0E0'
bokeh_plot.ygrid.grid_line_dash = [4, 6]
bokeh_plot.toolbar.logo = None
return bokeh_plot
def customize_combined_backlog_plot(bokeh_plot):
""" Apply customizations to the Combined plot. """
bokeh_plot.xaxis.major_label_text_font_size = '8pt'
bokeh_plot.yaxis.major_label_text_font_size = '10pt'
bokeh_plot.title.text_font_size = '12pt'
bokeh_plot.title.text_color = "#305496"
bokeh_plot.xaxis.axis_line_width = 2
bokeh_plot.yaxis.axis_line_width = 2
bokeh_plot.xaxis.major_label_orientation = 'vertical'
bokeh_plot.yaxis.major_label_orientation = 'horizontal'
bokeh_plot.yaxis.axis_label_text_font_size = '10pt'
bokeh_plot.ygrid.grid_line_color = '#F0F0F0'
bokeh_plot.ygrid.grid_line_dash = [4, 6]
bokeh_plot.toolbar.logo = None
# Format the y-axis ticks in thousands with a dollar sign
bokeh_plot.yaxis.formatter =CustomJSTickFormatter(code="""
return '$' + (tick / 1000).toFixed(0) + 'k';
""")
return bokeh_plot
def create_total_quantity_backlog_plot(df_Backlog_overview, default_program_historic):
# Filter data by the default program
filtered_data = df_Backlog_overview[df_Backlog_overview['Program'] == default_program_historic]
if filtered_data.empty:
print("No data found for the default program.")
return None
# Aggregate data: Sum 'Backlog Qty' for each 'Pty Indice'
aggregated_data = filtered_data.groupby('Pty Indice')['Backlog Qty'].sum().reset_index()
# If the program is 'Phase 4-5', sort by 'Priority'
if default_program_historic == 'Phase 4-5':
# Merge the aggregated data with original to retain 'Priority'
aggregated_data = pd.merge(aggregated_data, filtered_data[['Pty Indice', 'Priority']].drop_duplicates(), on='Pty Indice')
# Convert 'Priority' values to integers for sorting
aggregated_data['Priority'] = aggregated_data['Priority'].astype(int)
# Sort numeric priorities in ascending order - Update 08/28
#aggregated_data = aggregated_data.sort_values(by='Priority', ascending=True)
aggregated_data.sort_values(by=['Priority', 'Pty Indice'], inplace=True)
# Define the uniform color
uniform_color = '#cdbedd' # Light blue color
# Create the plot
total_quantity_plot = aggregated_data.hvplot.bar(
x='Pty Indice',
y='Backlog Qty',
title="Total Backlog Qty Monthly",
xlabel='Pty Indice',
ylabel='Total Backlog Qty',
#cmap=custom_palette_bkg,
color=uniform_color, # Apply the same color to all bars
legend='top_left',
height=400,
tools=[]
)
return total_quantity_plot
def create_backlog_chart_detailed (backlog_monthly_summary_combined, default_program_historic, df_Backlog_overview):
# Filter data by default program
filtered_data = backlog_monthly_summary_combined[backlog_monthly_summary_combined['Program'] == default_program_historic]
if filtered_data.empty:
print("No data found for the default program.")
return None, None, None
# Melt the DataFrame to include Normalized Complexity
melted_df = filtered_data.melt(id_vars=['Month'], value_vars=['Backlog Qty', 'Sales', 'IDD Marge Standard', 'Normalized Complexity'],
var_name='Quantity Type', value_name='Quantity Value')
# Create plot for 'Backlog Qty' and 'Normalized Complexity'
backlog_qty_plot = melted_df[melted_df['Quantity Type'].isin(['Backlog Qty', 'Normalized Complexity'])].hvplot.bar(
x='Month',
y='Quantity Value',
color='Quantity Type',
title="Monthly Backlog - Backlog Quantity & Normalized Complexity",
xlabel='Month',
ylabel='Backlog Qty & Normalized Complexity',
cmap=custom_palette_bkg,
legend='top_left',
height=400,
bar_width=0.6, # Set bar width - 09/12
tools=[]
)
bokeh_backlog_qty_plot = hv.render(backlog_qty_plot, backend='bokeh')
bokeh_backlog_qty_plot = customize_qty_backlog_plot(bokeh_backlog_qty_plot)
#####################################################
# Remove existing HoverTools (if any) before adding a new one
bokeh_backlog_qty_plot.tools = [tool for tool in bokeh_backlog_qty_plot.tools if not isinstance(tool, HoverTool)]
# Add HoverTool with custom formatting
hover = HoverTool()
hover.tooltips = [
("Month", "@Month"),
("KPI", "@color"),
("Value", "@Quantity_Value")
]
# Add HoverTool to the plot
bokeh_backlog_qty_plot.add_tools(hover)
# Remove wheel zoom from active tools if you want it inactive by default - 08/12
bokeh_backlog_qty_plot.tools = [tool for tool in bokeh_backlog_qty_plot.tools if not isinstance(tool, WheelZoomTool)]
############################################################
# Create combined plot for 'IDD Marge Standard' and 'Sales'
combined_backlog_plot = melted_df[melted_df['Quantity Type'].isin(['IDD Marge Standard', 'Sales'])].hvplot.bar(
x='Month',
y='Quantity Value',
color='Quantity Type',
title="Monthly Backlog - IDD Margin & Total Sales",
xlabel='Month',
ylabel='[K$]',
cmap=custom_palette_bkg,
legend='top_left',
stacked=True, # Stacking bars
height=400,
bar_width=0.6, # Set bar width - 09/12
tools=[]
)
bokeh_combined_backlog_plot = hv.render(combined_backlog_plot, backend='bokeh')
bokeh_combined_backlog_plot = customize_combined_backlog_plot(bokeh_combined_backlog_plot)
#New 08/08
#####################################################
# Remove existing HoverTools (if any) before adding a new one
bokeh_combined_backlog_plot.tools = [tool for tool in bokeh_combined_backlog_plot.tools if not isinstance(tool, HoverTool)]
# Add HoverTool with custom formatting
hover = HoverTool()
hover.tooltips = [
("Month", "@Month"),
("KPI", "@color"),
("Value", "@Quantity_Value{($0,0k)}") # Format values: thousands with 'K' # Quantity_Value with the '_' otherwise that does not work!
]
# Add HoverTool to the plot
bokeh_combined_backlog_plot.add_tools(hover)
# Remove wheel zoom from active tools if you want it inactive by default - 08/12
bokeh_combined_backlog_plot.tools = [tool for tool in bokeh_combined_backlog_plot.tools if not isinstance(tool, WheelZoomTool)]
############################################################
# Create Total Quantity backlog plot
total_quantity_backlog_plot = create_total_quantity_backlog_plot(df_Backlog_overview, default_program_historic)
if total_quantity_backlog_plot:
bokeh_total_quantity_backlog_plot = hv.render(total_quantity_backlog_plot, backend='bokeh')
bokeh_total_quantity_backlog_plot = customize_total_quantity_backlog_plot(bokeh_total_quantity_backlog_plot)
# Remove wheel zoom from active tools if you want it inactive by default - 08/12
bokeh_total_quantity_backlog_plot.tools = [tool for tool in bokeh_total_quantity_backlog_plot.tools if not isinstance(tool, WheelZoomTool)]
else:
bokeh_total_quantity_backlog_plot = None
return bokeh_backlog_qty_plot, bokeh_combined_backlog_plot, bokeh_total_quantity_backlog_plot
def update_backlog_chart_combined(event):
# Get the selected program from the widget
program = program_widget_historic.value
#print(f"Updating plots for program: {program}")
# Filter data by the selected program
filtered_data = backlog_monthly_summary_combined[backlog_monthly_summary_combined['Program'] == program]
if filtered_data.empty:
print("No data found for the selected program.")
return
# Melt the DataFrame
melted_df = filtered_data.melt(id_vars=['Month'], value_vars=['Backlog Qty', 'Sales', 'IDD Marge Standard', 'Normalized Complexity'],
var_name='Quantity Type', value_name='Quantity Value')
# Update plots
bokeh_backlog_qty_plot, bokeh_combined_backlog_plot, bokeh_total_quantity_backlog_plot = create_backlog_chart_detailed(filtered_data, program, df_Backlog_overview)
# Update the plots in the Panel layout
backlog_plot_pane1.object = bokeh_backlog_qty_plot
backlog_plot_pane2.object = bokeh_combined_backlog_plot
backlog_plot_pane3.object = bokeh_total_quantity_backlog_plot
# Create initial bokeh plots
bokeh_backlog_qty_plot, bokeh_combined_backlog_plot, bokeh_total_quantity_backlog_plot = create_backlog_chart_detailed(backlog_monthly_summary_combined, default_program_historic, df_Backlog_overview)
# Convert Bokeh plots to Panel
backlog_plot_pane1 = pn.pane.Bokeh(bokeh_backlog_qty_plot, sizing_mode='stretch_width')
backlog_plot_pane2 = pn.pane.Bokeh(bokeh_combined_backlog_plot, sizing_mode='stretch_width')
backlog_plot_pane3 = pn.pane.Bokeh(bokeh_total_quantity_backlog_plot, sizing_mode='stretch_width')
# Update plot initially - Needed for the sizing_mode='stretch_width' to be set
update_backlog_chart_combined(None)
#######################################################
# Watch the widget and update the plot on change
program_widget_historic.param.watch(update_backlog_chart_combined, 'value')
#//////////////////////////////////////
############################################################################################
# Display the datafram monthly_summary of list of Pty Indice for each Month under Graph 3
#############################################################################################
#///////////////////////////////////////
# Function to remove duplicates in comma-separated strings
def remove_duplicates_from_string(s):
items = s.split(', ')
unique_items = sorted(set(items), key=items.index) # Preserve order
return ', '.join(unique_items)
# Function to filter and sort DataFrame by program and month (for backlog)
def filter_dataframe_monthly_summary_backlog(program):
# Apply the filter based on selected program
filtered_df = backlog_monthly_summary_combined[backlog_monthly_summary_combined['Program'] == program]
# Check if the filtered DataFrame is empty
if filtered_df.empty:
print("No data found for the specified program.") # New check for empty DataFrame
return filtered_df # Return empty DataFrame if no matches found
# Filter columns
filtered_df = filtered_df[['Month', 'Pty Indice', 'Backlog Qty', 'IDD Top Level']]
# Remove duplicates in specified columns
filtered_df['Pty Indice'] = filtered_df['Pty Indice'].apply(remove_duplicates_from_string)
filtered_df['IDD Top Level'] = filtered_df['IDD Top Level'].apply(remove_duplicates_from_string)
# Create a temporary column for sorting by converting 'Month' to datetime
filtered_df['Month_dt'] = pd.to_datetime(filtered_df['Month'], format='%b %y', errors='coerce')
# Check for any invalid dates after conversion
if filtered_df['Month_dt'].isnull().any():
print("Some dates could not be parsed. Please check the 'Month' column for incorrect formats.") # Error handling
return filtered_df # Return DataFrame without sorting
# Sort by the new 'Month_dt' column
filtered_df = filtered_df.sort_values(by='Month_dt') # ascending=False - Do not set by desceding to display older month first
# Reset the index after sorting
filtered_df.reset_index(drop=True, inplace=True)
# Print the DataFrame before deleting the temporary column
#print("Filtered and sorted DataFrame before dropping the temporary column:")
#display(filtered_df) # Displaying the DataFrame for verification
# Drop the 'Month_dt' column
filtered_df = filtered_df.drop(columns=['Month_dt'])
return filtered_df
#####################################################
# Table colored in purple and white every other rows
#######################################################
# Function to apply custom styles to the DataFrame (alternating row colors)
def style_dataframe_purple(df):
def row_styles(row):
# Alternate row colors based on row index
color = '#E4DFEC' if row.name % 2 == 0 else '#ffffff' # Alternate colors
return [f'background-color: {color}'] * len(row) # Apply to all columns
# Apply the style function to the DataFrame rows
styled_df = df.style.apply(row_styles, axis=1)
# Hide the index
styled_df.hide(axis="index") # Hide index
return styled_df
# Function to update DataFrame display with custom styling
def update_dataframe_monthly_summary_backlog(program):
filtered_df = filter_dataframe_monthly_summary_backlog(program)
styled_df = style_dataframe_purple(filtered_df)
styled_html = styled_df.to_html() # New 10/24
# Add CSS for overflow handling directly in the HTML
html_with_overflow = f'<div style="overflow-y: auto; height: 450px;">{styled_html}</div>'
return html_with_overflow
# Initialize the backlog table
monthly_summary_backlog_table = pn.pane.HTML(update_dataframe_monthly_summary_backlog(default_program_historic), width=700)
# Callback function to update the table based on widget value
def update_table_backlog(event):
print(f"Widget value changed to: {event.new}") # Check new value
new_df = filter_dataframe_monthly_summary_backlog(event.new)
# Style the new DataFrame
styled_df = style_dataframe_purple(new_df)
# Convert styled DataFrame to HTML for rendering
styled_html = styled_df.to_html()
html_with_overflow = f'<div style="overflow-y: auto; height: 450px;">{styled_html}</div>'
# Update the object attribute directly
monthly_summary_backlog_table.object = html_with_overflow # Update existing HTML pane
# Attach callback to the widget
program_widget_historic.param.watch(update_table_backlog, 'value')
# Create a backlog high level summary table with 'Backlog Quantity' of PN and 'Sales' related to 'Total Past due backlog',
# 'Total future backlog', 'Total current year remaining backlog'
backlog_highlevelsummary_table = None
######################################
# Create text bellow graphs
########################################
# Convert 'Invoice date' to datetime format
df_Backlog_overview['Due Date'] = pd.to_datetime(df_Backlog_overview['Due Date'])
text_below_graph_backlog_qty_plot = (
f"This graph is based on data from |CM-Backlog|:<br>"
"▷ <b>Backlog Qty</b>: Total quantity of Top-Level related to the selected program in IDD backlog <br>"
"➥ The backlog does not necessarily represent the Master Production Schedule (MPS) as manually entered by the Master Scheduler. <br>"
"▷ <b>Normalized Complexity</b>: Average complexity of the Top-Level in backlog normalized on the quantity of each PN on the period.<br>"
"▷ <b>The complexity is define as</b>: Kit, Subs = 0, Lighplate = 1, Rotottelite = 2, CPA = 3, ISP = 4.<br>"
)
text_below_graph_Marge_Sales_Backlog = (
f"This graph is based on data from |CM-Backlog|:<br>"
"▷ <b>Sales</b>: Sum of the 'Currency turnover ex.VAT' for the PN in backlog during the specified month<br>"
"▷ <b>IDD Marge Standard</b>: Sum of the 'IDD Margin Standard' for the PN in backlog during the specified month.<br>"
"➥ The value is displayed as: Gain (Loss). <br>"
)
text_below_graph_backlog_pty_indice = (
f"This graph is based on data from |CM-Backlog|:<br>"
"▷ <b>Total Backlog Qty </b>: Total quantity of Top-Level related to the selected pty Indice in IDD Backlog.<br>"
)
##############################################
# Combine plots into a vertical Panel layout
###############################################
# Combine the plots and table in the layout
# Create the dashboard layout for backlog overview
Backlogoverview_dashboard = pn.Column(
pn.Row(
# First Row with two columns
pn.Column(
backlog_plot_pane1, # First plot
text_below_graph_backlog_qty_plot # Text below first plot
),
pn.Spacer(width=50), # Spacer between columns
vertical_divider_medium2, # Vertical divider
pn.Spacer(width=50), # Spacer between columns
pn.Column(
backlog_plot_pane2, # Second plot
text_below_graph_Marge_Sales_Backlog # Text below second plot
),
sizing_mode='stretch_width' # Stretch columns to fit width
),
pn.Spacer(height=50), # Spacer before the next row
pn.Row(
# Second Row with another two columns
pn.Column(
backlog_plot_pane3, # Third plot
text_below_graph_backlog_pty_indice # Text below third plot
),
monthly_summary_backlog_table, # Summary table on the right
),
)
#//////////////////////////////////////////////////
#######################################################################################################################
# Fianal Layout of the |Project Overview| tab
#######################################################################################################################
#//////////////////////////////////////////////////
# Define your color
line_color = "#4472C4" # Change this to your desired color
font_top_color = "#4472C4"
subtitle_background_color = "#aee0d9" # "#F2F2F2" #Gray
# Convert end_date_historic to the desired format
formatted_end_date = end_date_historic.strftime("%m/%d/%Y")
# Use the formatted date in your string
Historic_title = f"Transfer Project Overview [{formatted_end_date}]"
Historic_subtitle = "Selection of the program"
Historic_subtitle2 = "Monthly shipments & related Sales"
Historic_subtitle3bis = " Distribution of product category"
Historic_subtitle3 = " Distribution of production status"
Historic_subtitle4 = "Pourcentage Completion of the project"
Historic_subtitle5 = "Backlog Overview"
Historic_subsubtitle1 = "Backlog high level summary"
###########################################
title_section = pn.pane.HTML(f"""
<div style='background-color: {font_top_color}; width: 100%; padding: 10px; box-sizing: border-box;'>
<h1 style='font-size: 24px; color: white; text-align: left; margin: 0;'>{Historic_title}</h1>
</div>
""", sizing_mode='stretch_width')
# Title Layout
title_layout = pn.Column(
title_section,
pn.layout.Divider(margin=(-10, 0, 0, 0)),
pn.Column(
#pn.pane.HTML(f"<h3 style='font-size: 12px; text-align: center; font-weight: normal;'>{Historic_subtitle}</h3>"),
pn.layout.Spacer(height=5),
pn.Row(
program_widget_historic,
sizing_mode='stretch_width'
),
sizing_mode='stretch_width'
),
sizing_mode='stretch_width'
)
# Define Secondary Layout
secondary_layout = pn.Column(
pn.pane.HTML(
f"""
<div style='background-color: {subtitle_background_color};
width: 100%;
padding: 10px;
box-sizing: border-box;
border-radius: 15px;'>
<h1 style='font-size: 22px; color: white; text-align: left; margin: 0;'>
{Historic_subtitle2}
</h1>
</div>
""",
sizing_mode='stretch_width'
),
pn.Spacer(height=50), # Spacer before plots
combined_plots_history,
pn.Spacer(height=50),
pn.layout.Divider(margin=(0, 0, -10, 0)),
# Percentage Completion of the project section
pn.pane.HTML(
f"""
<div style='background-color: {subtitle_background_color};
width: 100%;
padding: 10px;
box-sizing: border-box;
border-radius: 15px;'>
<h1 style='font-size: 22px; color: white; text-align: left; margin: 0;'>
{Historic_subtitle4}
</h1>
</div>
""",
sizing_mode='stretch_width'
),
pn.Spacer(height=50), # Spacer before plots
completion_dashboard,
pn.Spacer(height=50),
pn.layout.Divider(margin=(0, 0, -10, 0)),
# Backlog Overview section
pn.pane.HTML(
f"""
<div style='background-color: {subtitle_background_color};
width: 100%;
padding: 10px;
box-sizing: border-box;
border-radius: 15px;'>
<h1 style='font-size: 22px; color: white; text-align: left; margin: 0;'>
{Historic_subtitle5}
</h1>
</div>
""",
sizing_mode='stretch_width'
),
pn.Spacer(height=50), # Spacer before plots
Backlogoverview_dashboard,
pn.Spacer(height=50), # Spacer before plots
pn.layout.Divider(margin=(0, 0, -10, 0)),
# Distribution of product category section
pn.pane.HTML(
f"""
<div style='background-color: {subtitle_background_color};
width: 100%;
padding: 10px;
box-sizing: border-box;
border-radius: 15px;'>
<h1 style='font-size: 22px; color: white; text-align: left; margin: 0;'>
{Historic_subtitle3bis}
</h1>
</div>
""",
sizing_mode='stretch_width'
),
pn.Spacer(height=50), # Spacer before plots
distribution_dashboard_product_category,
pn.Spacer(height=50), # Spacer before plots
pn.layout.Divider(margin=(0, 0, -10, 0)),
# Distribution of production status section
pn.pane.HTML(
f"""
<div style='background-color: {subtitle_background_color};
width: 100%;
padding: 10px;
box-sizing: border-box;
border-radius: 15px;'>
<h1 style='font-size: 22px; color: white; text-align: left; margin: 0;'>
{Historic_subtitle3}
</h1>
</div>
""",
sizing_mode='stretch_width'
),
pn.Spacer(height=50), # Spacer before plots
distribution_dashboard_production_status,
pn.Spacer(height=50), # Spacer before plots
)
# Combine Title, Primary, and Secondary Layouts
historic_tab = pn.Column(
title_layout,
pn.layout.Divider(margin=(0, 0, -10, 0)), # Add some space between primary and secondary layouts if needed
secondary_layout,
pn.Spacer(height=50), # Spacer before plots
pn.layout.Divider(margin=(0, 0, -10, 0)), # Add some space between primary and secondary layouts if needed
sizing_mode='stretch_width' # Ensure the final layout stretches to fill available space
)
#||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#*****************************************************************************************************************************
##############################################################################################################################
# |Priority List|
##############################################################################################################################
#*****************************************************************************************************************************
#||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#Load df_Priority as it has been filtered previously on the code
df_Priority_table = pd.read_excel(input_file_formatted, sheet_name='CM-Priority', index_col=False)
#----------------------------------------------------------
# 02/11 - Change 'Phase 4' or 'Phase 5' with 'Phase 4-5'
#----------------------------------------------------------
# For df_Priority
if 'Program' in df_Priority_table.columns and 'Pty Indice' in df_Priority_table.columns:
mask = (
df_Priority_table['Program'].isin(['Phase 4', 'Phase 5']) &
~df_Priority_table['Pty Indice'].str.contains('Phase5', na=False)
)
df_Priority_table.loc[mask, 'Program'] = 'Phase 4-5'
#----------------------------------------------------------
######################
# Create Datafram
######################
#Display a simplified CM-Priorty to have access to the IDD Top-Level and SEDA Top-Level associated with the Pty Indice
List_priority = df_Priority_table[['Priority', 'Pty Indice', 'IDD Top Level', 'SEDA Top Level', 'Description', 'Critical Qty', 'Production Status', 'Program']]
# Include 'Product Category' within List_priority
List_priority['Product Category'] = List_priority['Description'].apply(determine_category)
# Replace 0 with 'Not yet assigned' in ['IDD Top Level']
List_priority['IDD Top Level'] = List_priority['IDD Top Level'].replace(0, 'Not yet assigned')
#################################################################################################################
# Widgets initialization
################################################################################################################
# Defaults program
default_program_List = 'Phase 4-5'
# Widgets initialization
unique_programs_List = df_Priority_table['Program'].dropna().unique().tolist()
default_program_List = unique_programs_List[0] # Use the first available program if the default is not in the list
program_widget_List = pn.widgets.Select(name='Select Program', options=unique_programs_List, value=default_program_List)
###################################################################
# Function to update the DataFrame based on the selected program
####################################################################
#######################
# Define color mappings
#########################
color_mapping_production_status = {
'Industrialized': '#D8E4BC', # Light Gren
'FTB WIP': '#DAEEF3', # Light Blue
'Proto WIP': '#DAEEF3', # Light Blue
'Completed': '#75B44A', # # Gray fill '#F2F2F2' or Dark green '##75B44A'
'To be transferred': '#F2DCDB', # Light red
'Officially transferred':'#FF7A5B',
'Canceled': '#F35757' # Dark red
}
font_mapping_production_status = {
'Industrialized': '#375623', # Dark green
'FTB WIP': '#0070C0', # Dark Blue
'Proto WIP': '#0070C0', # Dark Blue
'Completed': '#375623', # Dark green
'To be transferred': '#C00000', # Dark red
'Officially transferred':'#C00000',
'Canceled': '#C00000' # Dark red
}
# Define border and alignment styles
light_gray_border = 'border: 1px solid #D3D3D3;'
centered_text = 'text-align: center;'
def apply_color_and_bold(row):
"""
Apply color and bold formatting to a row based on 'Production Status' and 'Pty Indice' values.
"""
styles = [''] * len(row)
font_colors = [''] * len(row)
production_status = row.get('Production Status', '')
pty_indice = row.get('Pty Indice', '')
# Determine the background color based on 'Production Status'
if production_status in color_mapping_production_status:
color = color_mapping_production_status[production_status]
else:
color = '#FFFFFF' # Default to white if status is not in the mapping
# Determine the font color based on 'Production Status'
if production_status in font_mapping_production_status:
font_color = f'color: {font_mapping_production_status[production_status]};'
else:
font_color = '' # Default to no color if status is not in the mapping
# Apply background color and border to each cell
for i in range(len(row)):
styles[i] = f'background-color: {color}; {light_gray_border}; {centered_text}'
if font_colors[i]: # Apply font color if it's set
styles[i] += f'; {font_colors[i]}'
# Add font color specifically to 'Production Status' and 'Pty Indice' cells
if 'Production Status' in row.index:
production_status_index = row.index.get_loc('Production Status')
styles[production_status_index] += f'; {font_color}; font-weight: bold;'
if 'Pty Indice' in row.index:
pty_indice_index = row.index.get_loc('Pty Indice')
styles[pty_indice_index] += f'; {font_color}; font-weight: bold;'
return styles
def format_priority_with_colors(df):
"""
Format DataFrame with colors and alignment.
"""
# Define header styling to center the text
header_style = {
'selector': 'thead th',
'props': [('text-align', 'center')]
}
# Apply color formatting and header styling
return df.style \
.apply(lambda row: apply_color_and_bold(row), axis=1) \
.set_table_styles([header_style]) \
.hide(axis="index")
##################
#Create panel pane
#################
# Function to update the DataFrame based on the selected program
def update_priority_table(event):
selected_program = event.new # Use event.new to get the new value
filtered_df = List_priority[List_priority['Program'] == selected_program].drop(columns=['Program'])
# Format the DataFrame only once
priority_table_pane.object = format_priority_with_colors(filtered_df)
# Initial display
initial_filtered_df = List_priority[List_priority['Program'] == default_program_List].drop(columns=['Program'])
priority_table_pane = pn.pane.DataFrame(format_priority_with_colors(initial_filtered_df), width=1000, index=False)
# Attach the callback to the program_widget_List
program_widget_List.param.watch(update_priority_table, 'value')
#################################
# Layout
#################################
Priority_title = "Priority List"
text_above_Priority = (
f"This table is based on data from |CM-Priority| - <b>{Date_CM_Priority}</b>:<br>"
"▷ <b>Priority List</b>: This table represents the total scope of the Transfer Project for the selected 'Program'.<br>"
"➥ It includes all PNs related to the project, regardless of whether they still have an existing IDD Backlog or if the 'Critical Quantity' defined as part of the transfer project has been reached.<br>"
"➥ Some PNs may not yet have an assigned IDD PN under 'IDD Top-Level'. In such cases, the BOM does not exist, and the given PN won't be present in the |Snapshot| table.<br>"
"➥ The color formatting is based on [<b>'Production Status'</b>].<br>"
)
# Create the title section for the Priority Tab
priority_title_section = pn.pane.HTML(f"""
<div style='background-color: {font_top_color}; width: 100%; padding: 10px; box-sizing: border-box;'>
<h1 style='font-size: 24px; color: white; text-align: left; margin: 0;'>{Priority_title}</h1>
</div>
""", sizing_mode='stretch_width')
# Create the layout for Priority Tab
priority_tab = pn.Column(
priority_title_section,
pn.layout.Divider(margin=(-10, 0, 0, 0)),
pn.Row(program_widget_List, sizing_mode='stretch_width'),
pn.Spacer(height=5),
text_above_Priority,
pn.Spacer(height=5),
priority_table_pane,
sizing_mode='stretch_width'
)
#||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#*****************************************************************************************************************************
##############################################################################################################################
# |Snapshot|
##############################################################################################################################
#*****************************************************************************************************************************
#||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#Load df_Snapshot_table as it has been filtered previously on the code
df_Snapshot_table = pd.read_excel(input_file_formatted, sheet_name='Snapshot', index_col=False)
#----------------------------------------------------------
# 02/11 - Change 'Phase 4' or 'Phase 5' with 'Phase 4-5'
#----------------------------------------------------------
# For df_Snapshot (new addition)
if 'Program' in df_Snapshot_table.columns and 'Pty Indice' in df_Snapshot_table.columns:
mask = (
df_Snapshot_table['Program'].isin(['Phase 4', 'Phase 5']) &
~df_Snapshot_table['Pty Indice'].str.contains('Phase5', na=False)
)
df_Snapshot_table.loc[mask, 'Program'] = 'Phase 4-5'
#----------------------------------------------------------
#Map 'Total Qty' from Pivot_table_completion['Total Quantity'] on 'Pty Indice'
# Create the mapping from 'Pty Indice' to 'Total Quantity'
mapping_total_qty = Pivot_table_completion.set_index('Pty Indice')['Total Quantity']
# Apply the mapping to df_Snapshot based on 'Pty Indice'
df_Snapshot_table['Total Quantity'] = df_Snapshot_table['Pty Indice'].map(mapping_total_qty)
#If df_Snapshot['Total Quantity'] = NaN it means that the PN is not in Pibot_table_completion meaning that IDD never shipped it --> Replace NaN with 'Critical Qty' or 'IDD Backlog Qty' watherver is the biggest
# Replace NaN values in 'Total Quantity' with the maximum of 'Critical Qty' or 'IDD Backlog Qty'
df_Snapshot_table['Total Quantity'] = df_Snapshot_table.apply(
lambda row: max(row['Critical Qty'], row['IDD Backlog Qty']) if pd.isna(row['Total Quantity']) else row['Total Quantity'],
axis=1
)
# Filter-out some column for better visibility
df_Snapshot_table = df_Snapshot_table.drop(columns=['IDD Top Level', 'SEDA Top Level', 'Engineering Cost'])
#################################################################################################################
# Widgets initialization
################################################################################################################
# Defaults program
default_program_snapshot = 'Phase 4-5'
# Widgets initialization
unique_programs_snapshot = df_Priority_table['Program'].dropna().unique().tolist()
default_program_snapshot = unique_programs_List[0] # Use the first available program if the default is not in the snapshot
program_widget_snapshot = pn.widgets.Select(name='Select Program', options=unique_programs_snapshot, value=default_program_snapshot)
############################
# Formatting snapshot table
###########################
# Replace NaN with 0
df_Snapshot_table = df_Snapshot_table.fillna(0)
#print(df_Snapshot_table[['Max Expected Time (full ASSY)[hour]', 'Avg Actual Time (full ASSY)[hour]']].dtypes)
# Formatting columns
df_Snapshot_table['Shipped'] = df_Snapshot_table['Shipped'].astype(int)
df_Snapshot_table['Remain. crit. Qty'] = df_Snapshot_table['Remain. crit. Qty'].round().astype(int)
df_Snapshot_table['IDD Marge Standard (unit)'] = df_Snapshot_table['IDD Marge Standard (unit)'].map('${:,.1f}'.format)
df_Snapshot_table['IDD Sale Price'] = df_Snapshot_table['IDD Sale Price'].map('${:,.1f}'.format)
df_Snapshot_table['IDD Production Cost (unit)'] = df_Snapshot_table['IDD Production Cost (unit)'].map('${:,.1f}'.format)
df_Snapshot_table['Critical Qty'] = df_Snapshot_table['Critical Qty'].astype(int)
df_Snapshot_table['Qty WIP'] = df_Snapshot_table['Qty WIP'].astype(int)
df_Snapshot_table['Total Quantity'] = df_Snapshot_table['Total Quantity'].astype(int)
df_Snapshot_table['Total WO Count'] = df_Snapshot_table['Total WO Count'].astype(int)
df_Snapshot_table['Max Expected Time (full ASSY)[hour]'] = df_Snapshot_table['Max Expected Time (full ASSY)[hour]'].apply(lambda x: 'No Data' if x == 0 else '{:.2f}'.format(x))
df_Snapshot_table['Avg Actual Time (full ASSY)[hour]'] = df_Snapshot_table['Avg Actual Time (full ASSY)[hour]'].apply(lambda x: 'No Data' if x == 0 else '{:.2f}'.format(x))
df_Snapshot_table['Max Standard Deviation [hour]'] = df_Snapshot_table['Max Standard Deviation [hour]'].apply(lambda x: 'No Data' if x == 0 else '{:.2f}'.format(x))
# Column name update in df_Snapshot
# Remove the percentage sign and convert to numeric
df_Snapshot_table['Actual vs Standard time [%]'] = pd.to_numeric(df_Snapshot_table['Actual vs Standard time [%]'].str.rstrip('%'), errors='coerce')
# Define a function to format the values or replace NaN with 'N/A'
def format_percentage(value):
if pd.isna(value):
return 'N/A'
return '{:.0f}%'.format(value)
# Apply the function to the column
df_Snapshot_table['Actual vs Standard time [%]'] = df_Snapshot_table['Actual vs Standard time [%]'].apply(format_percentage)
# Replace 0 with ''
df_Snapshot_table['Start date target'] = df_Snapshot_table['Start date target'].replace(0, '')
###################################################################
# Function to update the DataFrame based on the selected program
##################################################################
#######################
# Define color mappings
#########################
color_mapping_production_status = {
'Industrialized': '#D8E4BC', # Light Gren
'FTB WIP': '#DAEEF3', # Light Blue
'Proto WIP': '#DAEEF3', # Light Blue
'Completed': '#75B44A', # # Gray fill '#F2F2F2' or Dark green '##75B44A'
'To be transferred': '#F2DCDB', # Light red
'Officially transferred':'#FF7A5B',
'Canceled': '#F35757' # Dark red
}
color_mapping_top_level_status = {
'Clear-to-Build': '#C6EFCE', # Light Green fill for 'Clear-to-Build'
'Short': '#FFC7CE', # Light Red fill for 'Short'
'Completed - No Backlog': '#6FAC46' # Medium Dark green in hex
}
font_mapping_top_level_status = {
'Clear-to-Build': '#4D7731', # Dark Green font for 'Clear-to-Build'
'Short': '#C00000', # Dark Red font for 'Short'
'Completed - No Backlog':'#548235' # Dark green in hex
}
font_mapping_production_status = {
'Industrialized': '#375623', # Dark green
'FTB WIP': '#0070C0', # Dark Blue
'Proto WIP': '#0070C0', # Dark Blue
'Completed': '#375623', # Dark green
'To be transferred': '#C00000', # Dark red
'Officially transferred':'#C00000',
'Canceled': '#C00000' # Dark red
}
def apply_color(row):
# Initialize color list with default (empty) colors
colors = [''] * len(row)
font_colors = [''] * len(row)
# Apply color based on 'Top-Level Status'
top_level_status = row.get('Top-Level Status', '')
if top_level_status in color_mapping_top_level_status:
colors[row.index.get_loc('Top-Level Status')] = color_mapping_top_level_status[top_level_status]
if top_level_status in font_mapping_top_level_status:
font_colors[row.index.get_loc('Top-Level Status')] = f'color: {font_mapping_top_level_status[top_level_status]};'
# Apply color for the rest of the row based on 'Production Status'
production_status = row.get('Production Status', '')
if production_status in color_mapping_production_status:
color = color_mapping_production_status[production_status]
for idx, value in enumerate(row):
if row.index[idx] != 'Top-Level Status':
colors[idx] = color
# Apply font color for 'Pty Indice' and 'Production Status' based on 'Production Status'
if production_status in font_mapping_production_status:
font_color = f'color: {font_mapping_production_status[production_status]};'
if 'Production Status' in row.index:
production_status_index = row.index.get_loc('Production Status')
font_colors[production_status_index] = font_color
if 'Pty Indice' in row.index:
pty_indice_index = row.index.get_loc('Pty Indice')
font_colors[pty_indice_index] = font_color
# Apply border, background color, and center text alignment to each cell
cell_styles = [f'background-color: {color}; {light_gray_border}; {centered_text}' for color in colors]
# Add font color to the cells as needed
for idx, font_color in enumerate(font_colors):
if font_color:
cell_styles[idx] += f'; {font_color}'
# Add bold formatting to the 'Production Status' and 'Pty Indice' cells
if 'Production Status' in row.index:
production_status_index = row.index.get_loc('Production Status')
cell_styles[production_status_index] += 'font-weight: bold;'
if 'Pty Indice' in row.index:
pty_indice_index = row.index.get_loc('Pty Indice')
cell_styles[pty_indice_index] += 'font-weight: bold;'
return cell_styles
# Apply color formatting and header centered
def format_snapshot_with_colors(df):
# Define header styling
header_style = {
'selector': 'thead th',
'props': [('text-align', 'center')]
}
# Apply color formatting and header styling
return df.style \
.apply(apply_color, axis=1) \
.set_table_styles([header_style]) \
.hide(axis="index")
##################
#Create panel pane
#################
# Update function
def update_snapshot_table(event):
selected_program = program_widget_snapshot.value
df_filtered = df_Snapshot_table[df_Snapshot_table['Program'] == selected_program].drop(columns=['Priority', 'Program', 'Description'])
styled_df_snapshot = format_snapshot_with_colors(df_filtered)
snapshot_table_pane.object = styled_df_snapshot
# Create the initial styled DataFrame pane
df_initial_filtered = df_Snapshot_table[df_Snapshot_table['Program'] == default_program_snapshot].drop(columns=['Priority', 'Program', 'Description'])
styled_df_snapshot = format_snapshot_with_colors(df_initial_filtered)
snapshot_table_pane = pn.pane.DataFrame(styled_df_snapshot)
# Attach the callback to the program_widget_List
program_widget_snapshot.param.watch(update_snapshot_table, 'value')
#################################
# Layout
##################################
Snapshot_title = "Snapshot"
text_above_snapshot = (
f"This table is based on data from |Snapshot| - <b>{file_date}</b>:<br>"
"▷ <b>Snapshot table</b>: This table represents the remaining scope of the Transfer Project for the selected 'Program'.<br>"
"➥ It includes all PNs that have an existing IDD Backlog or for which the 'Critical Quantity', defined as part of the transfer project, has not yet been reached. This applies even if the PN is not currently listed in the IDD Backlog.<br>"
"➥ Some PNs may not yet have an assigned IDD PN under 'IDD Top-Level'. In such cases, the BOM does not exist, and the given PN won't be present in this table.<br>"
"➥ The color formatting is based on ['Top-Level Status'] & ['Production Status'].<br>"
)
# Create the title section for the Priority Tab
Snapshot_title_section = pn.pane.HTML(f"""
<div style='background-color: {font_top_color}; width: 100%; padding: 10px; box-sizing: border-box;'>
<h1 style='font-size: 24px; color: white; text-align: left; margin: 0;'>{Snapshot_title}</h1>
</div>
""", sizing_mode='stretch_width')
# Create the layout for Priority Tab
Snapshot_tab = pn.Column(
Snapshot_title_section,
pn.layout.Divider(margin=(-10, 0, 0, 0)),
pn.Row(program_widget_snapshot, sizing_mode='stretch_width'),
pn.Spacer(height=5),
text_above_snapshot,
pn.Spacer(height=5),
snapshot_table_pane,
sizing_mode='stretch_width'
)
#||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#*****************************************************************************************************************************
##############################################################################################################################
# |Cover Dashboard|
##############################################################################################################################
#*****************************************************************************************************************************
#||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
# Loading dataframe used for the |Cover Dashboard|
####################################################
# Create a dictionnay to assigned a 'Product Category' to a 'Pty Indice' based on the function
def determine_category(description):
if not isinstance(description, str):
return 'Others'
if description == 'Rototellite':
return 'Rototellite'
elif 'Indicator' in description or 'CPA' in description:
return 'CPA'
elif 'Lightplate' in description:
return 'Lightplate'
elif 'ISP' in description or 'Keyboard' in description:
return 'ISP'
elif 'Module' in description:
return 'CPA'
elif 'optics' in description:
return 'Fiber Optics'
else:
return 'Others'
################################################################################################
# Sales and shipement progress using panel indicators 'Number', 'Progress' and 'Trend'
################################################################################################
df_Historic_dashboard = pd.read_excel(input_file_formatted, sheet_name='CM-Historic', index_col=False)
df_Priority_dashboard = pd.read_excel(input_file_formatted, sheet_name='CM-Priority', index_col=False)
df_Backlog_dashboard = df_Backlog.copy()
#----------------------------------------------------------
# 02/11 - Change 'Phase 4' or 'Phase 5' with 'Phase 4-5'
#----------------------------------------------------------
# For df_Priority
if 'Program' in df_Priority_dashboard.columns and 'Pty Indice' in df_Priority_dashboard.columns:
mask = (
df_Priority_dashboard['Program'].isin(['Phase 4', 'Phase 5']) &
~df_Priority_dashboard['Pty Indice'].str.contains('Phase5', na=False)
)
df_Priority_dashboard.loc[mask, 'Program'] = 'Phase 4-5'
# For df_Historic
if 'Program' in df_Historic_dashboard.columns and 'Pty Indice' in df_Historic_dashboard.columns:
mask = (
df_Historic_dashboard['Program'].isin(['Phase 4', 'Phase 5']) &
~df_Historic_dashboard['Pty Indice'].str.contains('Phase5', na=False)
)
df_Historic_dashboard.loc[mask, 'Program'] = 'Phase 4-5'
#----------------------------------------------------------
# Create 'Product_Category' column based on the 'Description' in order to apply the filter if needed
# df_Historic_dashboard already contain ['Product Category']
#df_Backlog_dashboard contain Product_Category
df_Priority_dashboard['Product Category'] = df_Priority_dashboard['Description'].apply(determine_category)
########################################################################################################################################################
######################
# df_Historic_dashboard
######################
# 'Standard amount USD' is not used in the code bellow. Only 'Currency turnover ex.VAT' is used to calculated the sales but 'Standard amount USD' to define the margin if needed later on.
df_Historic_dashboard['Order'] = df_Historic_dashboard['Order'].astype(str)
df_Historic_dashboard = df_Historic_dashboard[~df_Historic_dashboard['Order'].str.contains('NC')]
df_Historic_dashboard = df_Historic_dashboard[['Pty Indice', 'Quantity', 'Invoice date', 'Order', 'Currency turnover ex.VAT', 'Standard amount USD', 'Program', 'IDD Marge Standard', 'Product Category']]
df_Historic_dashboard.rename(columns={'Quantity': 'Qty Shipped'}, inplace=True)
df_Historic_dashboard['Qty Shipped'] = df_Historic_dashboard['Qty Shipped'].astype(int)
df_Historic_dashboard.dropna(inplace=True)
df_Historic_dashboard['Invoice date'] = pd.to_datetime(df_Historic_dashboard['Invoice date'])
df_Historic_dashboard['Year'] = df_Historic_dashboard['Invoice date'].dt.year
df_Historic_dashboard['Month'] = df_Historic_dashboard['Invoice date'].dt.month
df_Historic_dashboard['Week'] = df_Historic_dashboard['Invoice date'].dt.isocalendar().week
#Rename 'Currency turnover ex.VAT' to 'Sales USD'
df_Historic_dashboard.rename(columns={'Currency turnover ex.VAT': 'Sales USD'}, inplace=True)
# Define the span_report_historic_dashboard
older_date = df_Historic_dashboard['Invoice date'].min()
recent_date = df_Historic_dashboard['Invoice date'].max()
span_report_historic_dashboard = (older_date, recent_date)
# Format the dates as short dates
older_date_str = older_date.strftime('%m/%d/%Y')
recent_date_str = recent_date.strftime('%m/%d/%Y')
# Create formatted title strings
span_report_historic_dashboard = f"{older_date_str} - {recent_date_str}"
#/////////////////////////////////////////////////////////////////////////////////////////////////////////////
#################################################################################################################
# Widgets initialization and datafram update
################################################################################################################
#/////////////////////////////////////////////////////////////////////////////////////////////////////////////
# Unique programs list from the dataframe
unique_programs_List = df_Priority['Program'].dropna().unique().tolist()
# Check if 'Phase 4-5' is in unique_programs_List, else fall back to the first item.
default_program_List = 'Phase 4-5' if 'Phase 4-5' in unique_programs_List else unique_programs_List[0]
# Widget initialization
program_widget_List = pn.widgets.Select(name='Select Program', options=unique_programs_List, value=default_program_List)
###################################################################
# Update the DataFrame based on the selected program
###################################################################
# Function to update data based on the selected program
#/////////////////////////////////////////////////////////////////////////////////////////////////////////////
# New 10/01
#######################################################################################################################################################
# Create a filter on the 'Product Category' to filter-out 'Lightplate' and 'Others' from the dashboard when click on a 2 disctincts button with 2 positions: "Included (on)/Excluded (off)" Lightplate, "Included (on)/Excluded (off)" others
# Directly filter the datafram df_Priority_dashboard, df_Historic_dashboard & df_Backlog_dashboard to filter-out the necessary rows based on the buttons
#######################################################################################################################################################
# Function to filter data based on toggle buttons for 'Lightplate' and 'Others' and to modify the original dataframes
# Function to apply filters based on toggle status
#/////////////////////////////////////////////////////////////////////////////////////////////////////////////
# Data Filtering Function
def filter_dashboard(df, include_lightplate, include_others, column_name):
"""Filter the dashboard DataFrame based on the include_lightplate and include_others flags."""
if not include_lightplate:
df = df[df[column_name] != 'Lightplate']
if not include_others:
df = df[df[column_name] != 'Others']
return df
# Function to apply filters based on the toggle buttons
def apply_filters(event):
global df_Historic_dashboard, df_Priority_dashboard, df_Backlog_dashboard
# Filter each dashboard with the correct column names
filtered_Historic = filter_dashboard(
df_Historic_dashboard, toggle_lightplate.value, toggle_others.value, 'Product Category'
)
filtered_Priority = filter_dashboard(
df_Priority_dashboard, toggle_lightplate.value, toggle_others.value, 'Product Category'
)
filtered_Backlog = filter_dashboard(
df_Backlog_dashboard, toggle_lightplate.value, toggle_others.value, 'Product_Category'
)
# Display filtered data shapes for debugging
#print("After Filtering:")
#print(f"Historic Dashboard Shape: {filtered_Historic.shape}")
#print(f"Priority Dashboard Shape: {filtered_Priority.shape}")
#print(f"Backlog Dashboard Shape: {filtered_Backlog.shape}")
# Attach this to the toggle events
def on_lightplate_toggle(event):
update_button_styles(toggle_lightplate)
apply_filters(event) # Pass the event argument here
def on_others_toggle(event):
update_button_styles(toggle_others)
apply_filters(event) # Pass the event argument here
# Create Toggle Widgets with default button styles
toggle_lightplate = pn.widgets.Toggle(name='Include Lightplate', value=True, button_type='primary')
toggle_others = pn.widgets.Toggle(name='Include Sub-Levels & Kits', value=True, button_type='primary')
# Update the button type based on its value
def update_button_styles(toggle_widget):
"""Updates the button style to solid (active) or outline (inactive) based on the value."""
if toggle_widget.value:
toggle_widget.button_type = 'primary' # Solid fill when active
else:
toggle_widget.button_type = 'default' # Outline when inactive
# Attach the update function to the toggle widgets
toggle_lightplate.param.watch(lambda event: update_button_styles(toggle_lightplate), 'value')
toggle_others.param.watch(lambda event: update_button_styles(toggle_others), 'value')
# Initial styles application
update_button_styles(toggle_lightplate)
update_button_styles(toggle_others)
################################################################################################################
#/////////////////////////////////////////////////////////////////////////////////////////////////////////////
# Define the YoY Monthly sales as a line graph
# Define the Monthly sales since the beginning of the project as a line graph
# Define the yearly, Monthly and since the beginning of the project sales & shipement
##################################################################
#///////////////////////////////////////////////////////////////
# Create sales and shipements Graphs YoY, MoM and total since inception
#///////////////////////////////////////////////////////////////
##################################################################
# Figure 'Year-Over-Year' monthly sales - Updated 02/18/25
##################################################################
def create_yoy_sales_figure(df_Historic_dashboard_filtered):
# Aggregate data by year and month for sales
df_YoY_sales = df_Historic_dashboard_filtered.groupby(['Year', 'Month']).agg({
'Sales USD': 'sum'
}).reset_index()
# Map month numbers to month names
month_map = {1: 'Jan', 2: 'Feb', 3: 'Mar', 4: 'Apr', 5: 'May', 6: 'Jun',
7: 'Jul', 8: 'Aug', 9: 'Sep', 10: 'Oct', 11: 'Nov', 12: 'Dec'}
df_YoY_sales['Month Name'] = df_YoY_sales['Month'].map(month_map)
# Convert sales amount to thousands of dollars (K$)
df_YoY_sales['Sales K$'] = df_YoY_sales['Sales USD'] / 1000
# Calculate the maximum y-value and add a 25% buffer
max_sales = df_YoY_sales['Sales K$'].max()
y_buffer = max_sales*0.25 # 25% buffer
y_range = (0, max_sales + y_buffer) # Set y-axis range
# Create a figure
months = list(month_map.values())
p_YoY_sales = figure(
title="Year-Over-Year Monthly Sales [K$]",
x_axis_label='Month',
y_axis_label='Sales [K$]',
x_range=months,
y_range=y_range, # Set the y-axis range
tools="pan,wheel_zoom,box_zoom,reset" # Do NOT include hover here
)
# Set the title color
p_YoY_sales.title.text_color = "#000000"
# Define specific colors for years 2023 to 2027
year_color_map = {
2023: 'blue',
2024: 'green',
2025: 'purple',
2026: 'orange',
2027: 'red'
}
# Track all glyph renderers (lines and circles)
all_renderers = []
# Plot data for each year
years = sorted(df_YoY_sales['Year'].unique())
for year in years:
subset = df_YoY_sales[df_YoY_sales['Year'] == year]
# Get the color for the year (default to gray if year not in the map)
line_color = year_color_map.get(year, 'gray') # Default to gray for unknown years
# Plot line and markers with legend labels
line = p_YoY_sales.line(
subset['Month Name'], subset['Sales K$'],
line_width=2, color=line_color, legend_label=str(year) # Add legend_label
)
circle = p_YoY_sales.circle(
subset['Month Name'], subset['Sales K$'],
size=8, color=line_color, alpha=0.8, legend_label=str(year) # Add legend_label
)
all_renderers.extend([line, circle]) # Add to renderer list
# Label placement logic (same as before)
# ... Inset label placement code here if needed
#############################################################
# Fix: Create HoverTool AFTER all glyphs are plotted
#############################################################
hover = HoverTool(
renderers=all_renderers, # Attach to all lines and circles
tooltips=[
('Month', '@x'),
('Sales', '@y{($0.0,0.0)}K') # Corrected syntax
],
mode='mouse' # Show tooltip closest to mouse
)
p_YoY_sales.add_tools(hover) # Add HoverTool explicitly
# Configure y-axis formatting
p_YoY_sales.yaxis.formatter = CustomJSTickFormatter(code="""
return '$' + (tick).toFixed(0) + 'k';
""")
# Customize grid lines
p_YoY_sales.ygrid.grid_line_dash = [6, 4]
p_YoY_sales.xgrid.visible = False
p_YoY_sales.toolbar.logo = None
# Customize legend
p_YoY_sales.legend.location = "top_left"
p_YoY_sales.legend.visible = True # Ensure the legend is visible
p_YoY_sales.legend.click_policy = "hide" # Allow hiding lines by clicking on legend
p_YoY_sales.legend.label_text_font_size = "10pt" # Adjust font size if needed
p_YoY_sales.legend.spacing = 5 # Add spacing between legend items
return p_YoY_sales
##################################################################
# Figure 'Year-Over-Year' monthly shipment - Updated 02/18/25
##################################################################
def create_yoy_shipments_figure(df_Historic_dashboard_filtered):
# Aggregate data by year and month for shipments
df_YoY_shipments = df_Historic_dashboard_filtered.groupby(['Year', 'Month']).agg({
'Qty Shipped': 'sum'
}).reset_index()
# Map month numbers to month names
month_map = {1: 'Jan', 2: 'Feb', 3: 'Mar', 4: 'Apr', 5: 'May', 6: 'Jun',
7: 'Jul', 8: 'Aug', 9: 'Sep', 10: 'Oct', 11: 'Nov', 12: 'Dec'}
df_YoY_shipments['Month Name'] = df_YoY_shipments['Month'].map(month_map)
# Calculate the maximum y-value and add a 25% buffer
max_shipments = df_YoY_shipments['Qty Shipped'].max()
y_buffer = max_shipments*0.25 # 25% buffer
y_range = (0, max_shipments + y_buffer) # Set y-axis range
# Create a figure
months = list(month_map.values())
p_YoY_shipments = figure(
title="Year-Over-Year Monthly Shipments [quantity shipped]",
x_axis_label='Month',
y_axis_label='Shipments',
x_range=months,
y_range=y_range, # Set the y-axis range
tools="pan,wheel_zoom,box_zoom,reset" # Do NOT include hover here
)
# Set the title color
p_YoY_shipments.title.text_color = "#000000"
# Define specific colors for years 2023 to 2027
year_color_map = {
2023: 'blue',
2024: 'green',
2025: 'purple',
2026: 'orange',
2027: 'red'
}
# Track all glyph renderers (lines and circles)
all_renderers = []
# Plot data for each year
years = sorted(df_YoY_shipments['Year'].unique())
for year in years:
subset = df_YoY_shipments[df_YoY_shipments['Year'] == year]
# Get the color for the year (default to gray if year not in the map)
line_color = year_color_map.get(year, 'gray') # Default to gray for unknown years
# Plot line and markers with legend labels
line = p_YoY_shipments.line(
subset['Month Name'], subset['Qty Shipped'],
line_width=2, color=line_color, legend_label=str(year) # Add legend_label
)
circle = p_YoY_shipments.circle(
subset['Month Name'], subset['Qty Shipped'],
size=8, color=line_color, alpha=0.8, legend_label=str(year) # Add legend_label
)
all_renderers.extend([line, circle]) # Add to renderer list
# Label placement logic (same as before)
# ... Inset label placement code here if needed
#############################################################
# Fix: Create HoverTool AFTER all glyphs are plotted
#############################################################
hover = HoverTool(
renderers=all_renderers, # Attach to all lines and circles
tooltips=[
('Month', '@x'),
('Shipments', '@y{0,0}')
],
mode='mouse' # Show tooltip closest to mouse
)
p_YoY_shipments.add_tools(hover) # Add HoverTool explicitly
# Customize grid lines
p_YoY_shipments.ygrid.grid_line_dash = [6, 4]
p_YoY_shipments.xgrid.visible = False
p_YoY_shipments.toolbar.logo = None
# Customize legend
p_YoY_shipments.legend.location = "top_left"
p_YoY_shipments.legend.visible = True # Ensure the legend is visible
p_YoY_shipments.legend.click_policy = "hide" # Allow hiding lines by clicking on legend
p_YoY_shipments.legend.label_text_font_size = "10pt" # Adjust font size if needed
p_YoY_shipments.legend.spacing = 5 # Add spacing between legend items
return p_YoY_shipments
##################################################################
#///////////////////////////////////////////////////////////////
# Create costumers Graph
#///////////////////////////////////////////////////////////////
##################################################################
#######################################################################################
# Define the total shipement and total sales per costumers
#######################################################################################
# Card 'Costumers'
#################################
#////////////////////////////////
################################
# Integrate logo to the graph
###############################
#////////////////////////////////
# Created 09/06
def image_to_base64(image_path):
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode('utf-8')
def scale_image(image_base64, max_width, max_height):
# Decode base64 image
image_data = base64.b64decode(image_base64)
image = Image.open(io.BytesIO(image_data))
# Calculate new dimensions maintaining aspect ratio
width, height = image.size
scaling_factor = min(max_width / width, max_height / height)
new_width = int(width * scaling_factor)
new_height = int(height * scaling_factor)
# Resize image with LANCZOS resampling
image = image.resize((new_width, new_height), Image.Resampling.LANCZOS)
# Encode image back to base64
buffered = io.BytesIO()
image.save(buffered, format="PNG")
new_image_base64 = base64.b64encode(buffered.getvalue()).decode('utf-8')
return f'data:image/png;base64,{new_image_base64}'
def create_logo_mapping(image_directory, max_width=30, max_height=30):
logo_mapping = {}
for image_name in os.listdir(image_directory):
if image_name.endswith(('png', 'jpg', 'jpeg')):
customer_name = os.path.splitext(image_name)[0]
image_path = os.path.join(image_directory, image_name)
base64_image = image_to_base64(image_path)
scaled_image = scale_image(base64_image, max_width, max_height)
logo_mapping[customer_name] = scaled_image
return logo_mapping
####################################
# Adjust the path to your directory
######################################
current_folder = os.getcwd() # Get the current working directory
image_directory = os.path.join(current_folder, 'Compressed_Images')
# Print all files in the directory to ensure the path is correct
#print(os.listdir(image_directory))
# Create logo mapping
logo_mapping = create_logo_mapping(image_directory)
#define the default logo offset on top of the bars
#default_logo_offset = 50
# Add this after creating logo_mapping to inspect
#print(logo_mapping)
#////////////////////////////////
#################################
# Customer graph
#################################
#////////////////////////////////
# WIP 09/18 for Hover compatibily
#def create_customers_figure(df_priority_dashboard_filtered, logo_mapping, logo_offset):
def create_customers_figure(df_priority_dashboard_filtered, logo_mapping):
if 'End Costumer' not in df_priority_dashboard_filtered.columns:
raise ValueError("'End Costumer' column is missing in the DataFrame")
# Rename all columns to use underscores consistently for Hover compatibility
df_customer = df_priority_dashboard_filtered.rename(columns={
'Pty Indice': 'Pty_Indice',
'Shipped': 'Total_Shipped',
'Production Status': 'Production_Status',
'End Costumer': 'End_Customer',
'Sales USD': 'Sales_USD',
'Qty Shipped': 'Qty_Shipped'
})
# Group and summarize customer data
df_customer_summary = df_customer.groupby('End_Customer').agg({
'Qty_Shipped': 'sum',
'Sales_USD': 'sum'
}).reset_index()
df_customer_summary['Sales_USD'] = df_customer_summary['Sales_USD'] / 1000
df_customer_summary['Logo'] = df_customer_summary['End_Customer'].map(logo_mapping)
# Define logo offset based on Qty_Shipped and Sales_USD - New 10/21
logo_offset_shipment = 100 if df_customer_summary['Qty_Shipped'].max() < 1000 else 300 # Check maximum Qty_Shipped
logo_offset_sale = 50 if df_customer_summary['Sales_USD'].max() < 1000 else 600 # Check maximum Sales_USD
# Add offset columns for logos
df_customer_summary['Logo_Offset_Shipments'] = df_customer_summary['Qty_Shipped'] + logo_offset_shipment
df_customer_summary['Logo_Offset_Sales'] = df_customer_summary['Sales_USD'] + logo_offset_sale
source = ColumnDataSource(df_customer_summary)
# Determine max values for setting axis limits
max_qty_shipped = df_customer_summary['Qty_Shipped'].max()
max_sales = df_customer_summary['Sales_USD'].max()
# Calculate new axis limits (max + 30%)
shipment_y_range = (0, max_qty_shipped * 1.5)
sales_y_range = (0, max_sales * 1.5)
# Create shipment figure
p_shipment = figure(
x_range=df_customer_summary['End_Customer'].tolist(),
title="Total shipment per customer [quantity shipped]",
x_axis_label='Customer',
y_axis_label='Shipments',
tools="pan,wheel_zoom,save,reset",
y_range=shipment_y_range
)
# Set the title color
p_shipment.title.text_color = "#000000"
p_shipment.vbar(
x='End_Customer',
top='Qty_Shipped',
width=0.4,
source=source,
color='green',
alpha=0.6
)
p_shipment.xaxis.major_label_text_font_size = '10pt'
# Add hover tool with underscores in tooltips
p_shipment.add_tools(HoverTool(
tooltips=[('Customer', '@End_Customer'), ('Qty Shipped', '@Qty_Shipped{0,0}')]
))
p_shipment.xaxis.major_label_orientation = 1.2
p_shipment.yaxis.formatter = NumeralTickFormatter(format="0,0")
# Add logos with original size and offset
p_shipment.add_glyph(source, ImageURL(
url='Logo',
x='End_Customer',
y='Logo_Offset_Shipments', # Use the offset column for shipments
anchor="center"
))
# Customize grid lines
p_shipment.ygrid.grid_line_dash = [6, 4]
p_shipment.xgrid.visible = False
# Hide the Bokeh logo
p_shipment.toolbar.logo = None
# Create sales figure
p_sales = figure(
x_range=df_customer_summary['End_Customer'].tolist(),
title="Total Sales per customer [K$]",
x_axis_label='Customer',
y_axis_label='Sales [K$]',
tools="pan,wheel_zoom,save,reset",
y_range=sales_y_range
)
# Set the title color
p_shipment.title.text_color = "#000000" #000000 nalck, #305496 blue
# Add hover tool for sales figure
p_sales.add_tools(HoverTool(
tooltips=[('Customer', '@End_Customer'), ('Sales', '@Sales_USD{($0,0.0)}K')]
))
p_sales.xaxis.major_label_orientation = 1.2
p_sales.yaxis.formatter =CustomJSTickFormatter(code="""
return '$' + (tick).toFixed(0) + 'k';
""")
p_sales.vbar(
x='End_Customer',
top='Sales_USD',
width=0.4,
source=source,
color='blue',
alpha=0.6
)
# Add logos with original size and offset
p_sales.add_glyph(source, ImageURL(
url='Logo',
x='End_Customer',
y='Logo_Offset_Sales', # Use the offset column for sales
anchor='center'
))
# Customize grid lines
p_sales.ygrid.grid_line_dash = [6, 4]
p_sales.xgrid.visible = False
# Hide the Bokeh logo
p_sales.toolbar.logo = None
return p_shipment, p_sales
##################################################################
#///////////////////////////////////////////////////////////////
# Create INDICATORS
#///////////////////////////////////////////////////////////////
##################################################################
# Card 'Yearly metrics - comparison this year vs last year
##################################################################
# --> Comparison of cumulative metrics from the start of the current year up to today against the same period in the previous year
def calculate_yearly_metrics(df_Historic_dashboard_filtered):
today = pd.to_datetime('today')
current_year = today.year
last_year = current_year - 1
current_month = today.month
current_day = today.day
# Calculate start date for the current year and last year
start_of_current_year = pd.Timestamp(year=current_year, month=1, day=1)
start_of_last_year = pd.Timestamp(year=last_year, month=1, day=1)
end_of_last_year = pd.Timestamp(year=last_year, month=current_month, day=current_day)
# Filter and aggregate data for the current year up to today
df_Current_Year = df_Historic_dashboard_filtered[
(df_Historic_dashboard_filtered['Year'] == current_year) &
(df_Historic_dashboard_filtered['Invoice date'] <= today)
].agg({
'Qty Shipped': 'sum',
'Sales USD': 'sum'
}).to_dict()
# Filter and aggregate data for the last year up to the same date
df_Last_Year = df_Historic_dashboard_filtered[
(df_Historic_dashboard_filtered['Year'] == last_year) &
(df_Historic_dashboard_filtered['Invoice date'] <= end_of_last_year)
].agg({
'Qty Shipped': 'sum',
'Sales USD': 'sum'
}).to_dict()
# All values converted to integer
total_shipped_current = int(df_Current_Year['Qty Shipped'])
total_sales_current = int(df_Current_Year['Sales USD'])
total_shipped_last = int(df_Last_Year['Qty Shipped'])
total_sales_last = int(df_Last_Year['Sales USD'])
# Calculate percentage change
if total_shipped_last != 0:
pct_change_shipped = ((total_shipped_current - total_shipped_last) / total_shipped_last) * 100
else:
pct_change_shipped = float('inf') if total_shipped_current > 0 else float('-inf')
if total_sales_last != 0:
pct_change_sales = ((total_sales_current - total_sales_last) / total_sales_last) * 100
else:
pct_change_sales = float('inf') if total_sales_current > 0 else float('-inf')
return {
'total_shipped_current': total_shipped_current,
'total_sales_current': total_sales_current,
'total_shipped_last': total_shipped_last,
'total_sales_last': total_sales_last,
'pct_change_shipped': pct_change_shipped,
'pct_change_sales': pct_change_sales
}
def create_yearly_metrics_indicator(df_Historic_dashboard_filtered):
# Calculate the metrics
metrics = calculate_yearly_metrics(df_Historic_dashboard_filtered)
# Determine trend colors and arrows
trend_color_shipped = "green" if metrics['pct_change_shipped'] >= 0 else "red"
trend_arrow_shipped = "▲" if metrics['pct_change_shipped'] >= 0 else "▼"
trend_color_sales = "green" if metrics['pct_change_sales'] >= 0 else "red"
trend_arrow_sales = "▲" if metrics['pct_change_sales'] >= 0 else "▼"
# Create HTML content with trend information
html_content = f"""
<div style="font-size: 20px; font-family: Arial, sans-serif; padding: 10px; border: 1px solid #ddd; border-radius: 5px; width: 100%; box-sizing: border-box;">
<h3 style="margin: 0; color: teal;"> Yearly comparison - {pd.to_datetime('today').year}</h3>
<p style="margin: 10px 0; font-size: 16px;">
Total Quantity Shipped: <strong style="color: {trend_color_shipped};">{metrics['total_shipped_current']:,.0f}</strong>
<span style="font-size: 14px; color: {trend_color_shipped};">({trend_arrow_shipped} {metrics['pct_change_shipped']:+,.1f}%)</span>
</p>
<p style="margin: 10px 0; font-size: 16px;">
Total realized Sales [USD]: <strong style="color: {trend_color_sales};">${metrics['total_sales_current']:,.0f}</strong>
<span style="font-size: 14px; color: {trend_color_sales};">({trend_arrow_sales} {metrics['pct_change_sales']:+,.1f}%)</span>
</p>
</div>
"""
# Create layout with indicators and HTML content
layout = pn.Column(
pn.pane.HTML(html_content, sizing_mode='stretch_width'),
sizing_mode='stretch_width'
)
return layout
##################################################################
# Card 'Since Inception - Beginning of the project'
##################################################################
# update 09/12 --> Include 'Total backlog Sales (USD)' & 'Total past due Sales (USD)'
# This 2 new variables should come from the datafram 'df_Backlog_dashboard' and be calculated based on the 'Requested Date' (because the 'Due Date' has been changed for some PN, the 'Requested Date' the real due date.
# --> Compare the previous month with the month before that
def calculate_since_inception_metrics(df_Historic_dashboard_filtered):
#print("calculate_since_inception_metrics running")
inception_year = df_Historic_dashboard_filtered['Year'].min() # Assuming inception is the earliest year in the dataset
current_year = pd.to_datetime('today').year
df_Since_Inception = df_Historic_dashboard_filtered[df_Historic_dashboard_filtered['Year'] >= inception_year].agg({
'Qty Shipped': 'sum',
'Sales USD': 'sum'
}).to_dict()
total_shipped_since_inception = int(df_Since_Inception['Qty Shipped'])
total_sales_since_inception = int(df_Since_Inception['Sales USD'])
return {
'total_shipped_since_inception': total_shipped_since_inception,
'total_sales_since_inception': total_sales_since_inception
}
def create_since_inception_indicator(df_Historic_dashboard_filtered):
#print("create_since_inception_indicator running")
metrics = calculate_since_inception_metrics(df_Historic_dashboard_filtered)
# Create HTML content without trend information
html_content = f"""
<div style="font-size: 20px; font-family: Arial, sans-serif; padding: 10px; border: 1px solid #ddd; border-radius: 5px; width: 100%; box-sizing: border-box;">
<h3 style="margin: 0; color: teal;">Since inception of the Project</h3>
<p style="margin: 10px 0; font-size: 16px;">
Total Quantity Shipped: <strong>{metrics['total_shipped_since_inception']:,.0f}</strong>
</p>
<p style="margin: 10px 0; font-size: 16px;">
Total realized Sales [USD]: <strong>${metrics['total_sales_since_inception']:,.0f}</strong>
</p>
</div>
"""
# Create layout with HTML content
layout = pn.Column(
pn.pane.HTML(html_content, sizing_mode='stretch_width'),
sizing_mode='stretch_width'
)
return layout
# 09/12
# --> Calcualte the 'Total backlog Sales (USD)' & 'Total past due Sales (USD)'
##################################################################
# Card 'Monthly metrics - Previous month vs previous previous month
##################################################################
def calculate_monthly_metrics(df_Historic_dashboard_filtered):
current_date = pd.to_datetime('today')
current_month = current_date.month
current_year = current_date.year
# Calculate the month and year for the previous month
if current_month > 1:
previous_month = current_month - 1
previous_month_year = current_year
else:
previous_month = 12
previous_month_year = current_year - 1
# Calculate the month and year for the month before the previous month
if previous_month > 1:
two_months_ago = previous_month - 1
two_months_ago_year = previous_month_year
else:
two_months_ago = 12
two_months_ago_year = previous_month_year - 1
# Filter and aggregate data for the previous month
df_Previous_Month = df_Historic_dashboard_filtered[
(df_Historic_dashboard_filtered['Month'] == previous_month) &
(df_Historic_dashboard_filtered['Year'] == previous_month_year)
].agg({
'Qty Shipped': 'sum',
'Sales USD': 'sum'
}).to_dict()
# Filter and aggregate data for the month before the previous month
df_Two_Months_Ago = df_Historic_dashboard_filtered[
(df_Historic_dashboard_filtered['Month'] == two_months_ago) &
(df_Historic_dashboard_filtered['Year'] == two_months_ago_year)
].agg({
'Qty Shipped': 'sum',
'Sales USD': 'sum'
}).to_dict()
# Extract aggregated values
total_shipped_previous = int(df_Previous_Month['Qty Shipped'])
total_sales_previous = int(df_Previous_Month['Sales USD'])
total_shipped_two_months_ago = int(df_Two_Months_Ago['Qty Shipped'])
total_sales_two_months_ago = int(df_Two_Months_Ago['Sales USD'])
# Calculate percentage changes
if total_shipped_two_months_ago != 0:
pct_change_shipped = ((total_shipped_previous - total_shipped_two_months_ago) / total_shipped_two_months_ago) * 100
else:
pct_change_shipped = float('inf') if total_shipped_previous > 0 else float('-inf')
if total_sales_two_months_ago != 0:
pct_change_sales = ((total_sales_previous - total_sales_two_months_ago) / total_sales_two_months_ago) * 100
else:
pct_change_sales = float('inf') if total_sales_previous > 0 else float('-inf')
return {
'total_shipped_previous': total_shipped_previous,
'total_sales_previous': total_sales_previous,
'total_shipped_two_months_ago': total_shipped_two_months_ago,
'total_sales_two_months_ago': total_sales_two_months_ago,
'pct_change_shipped': pct_change_shipped,
'pct_change_sales': pct_change_sales
}
def create_monthly_metrics_indicator(df_Historic_dashboard_filtered):
# Calculate the current date and determine the previous month and year
today = pd.to_datetime('today')
if today.month > 1:
previous_month = today.month - 1
previous_month_year = today.year
else:
previous_month = 12
previous_month_year = today.year - 1
# Convert month number to month name
month_names = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
previous_month_name = month_names[previous_month - 1]
# Calculate metrics for the previous month compared to two months ago
metrics = calculate_monthly_metrics(df_Historic_dashboard_filtered)
# Determine trend colors and arrows
trend_color_shipped = "green" if metrics['pct_change_shipped'] >= 0 else "red"
trend_arrow_shipped = "▲" if metrics['pct_change_shipped'] >= 0 else "▼"
trend_color_sales = "green" if metrics['pct_change_sales'] >= 0 else "red"
trend_arrow_sales = "▲" if metrics['pct_change_sales'] >= 0 else "▼"
# Create HTML content with trend information
html_content = f"""
<div style="font-size: 20px; font-family: Arial, sans-serif; padding: 10px; border: 1px solid #ddd; border-radius: 5px; width: 100%; box-sizing: border-box;">
<h3 style="margin: 0; color: teal;">Monthly comparison - {previous_month_name} {previous_month_year}</h3>
<p style="margin: 10px 0; font-size: 16px;">
Total Quantity Shipped (vs. previous Month): <strong style="color: {trend_color_shipped};">{metrics['total_shipped_previous']:,.0f}</strong>
<span style="font-size: 14px; color: {trend_color_shipped};">({trend_arrow_shipped} {metrics['pct_change_shipped']:+,.1f}%)</span>
</p>
<p style="margin: 10px 0; font-size: 16px;">
Total realized Sales [USD] (vs. previous Month): <strong style="color: {trend_color_sales};">${metrics['total_sales_previous']:,.0f}</strong>
<span style="font-size: 14px; color: {trend_color_sales};">({trend_arrow_sales} {metrics['pct_change_sales']:+,.1f}%)</span>
</p>
</div>
"""
# Create layout with indicators and HTML content
layout = pn.Column(
pn.pane.HTML(html_content, sizing_mode='stretch_width'),
sizing_mode='stretch_width'
)
return layout
#/////////////////////////////////////////////////////////////
##############################################################
# Creation of Card and Layout
##############################################################
#/////////////////////////////////////////////////////////////
##############################################
# Defining color and dimensions for the cards
###############################################
# Define style properties for each card
# background_color = Header background color
# font_color = Header text font color
# width & height are the dimension of the card, and margin is the margin around the card (horizontal, vertical)
card_styles = {
"YoY Sales": {
"background_color": "#98c6e5",
"font_color": "white",
"width": 1800,
"height": 500,
"margin": (5, 5),
"font_weight": "bold"
},
"YoY Shipments": {
"background_color": "#98c6e5",
"font_color": "white",
"width": 1800,
"height": 500,
"margin": (5, 5),
"font_weight": "bold"
},
"Total Shipment per customer": {
"background_color": "#aee0d9",
"font_color": "white",
"width": 900,
"height": 500,
"margin": (5, 5),
"font_weight": "bold"
},
"Total sales per customer": {
"background_color": "#aee0d9",
"font_color": "white",
"width": 900,
"height": 500,
"margin": (5, 5),
"font_weight": "bold"
},
"Yearly KPI": {
"background_color": "#98c6e5",
"font_color": "white",
"width": 650,
"height": 200,
"margin": (5, 5),
"font_weight": "bold"
},
"Cumulative KPI": {
"background_color": "#aee0d9",
"font_color": "white",
"width": 650,
"height": 200,
"margin": (5, 5),
"font_weight": "bold"
},
"Monthly KPI": {
"background_color": "#a8c1a5",
"font_color": "white",
"width": 650,
"height": 200,
"margin": (5, 5),
"font_weight": "bold"
}
}
###############################################
# Defining function
###############################################
def create_card_indicators(card_title, panel_object, styles):
# Ensure the panel object has responsive sizing
panel_object.sizing_mode = 'stretch_both'
# Create inline HTML for the card header and card styles (border, round corners)
card_style = f"""
<style>
.custom-card {{
border: 2px solid {styles["background_color"]}; /* Border matching header background */
border-radius: 10px; /* Rounded corners for the card */
overflow: hidden; /* Ensure content fits within the card */
box-shadow: 2px 2px 10px rgba(0, 0, 0, 0.1); /* Add a subtle shadow for aesthetics */
}}
.custom-card-header {{
background-color: {styles["background_color"]}; /* Header background */
color: {styles["font_color"]}; /* Header font color */
padding: 10px;
font-size: 16px;
text-align: center;
font-weight: bold; /* Make text bold */
width: 100%;
border-top-left-radius: 10px; /* Rounded top corners */
border-top-right-radius: 10px; /* Rounded top corners */
}}
</style>
"""
header_html = f"<div class='custom-card-header'>{card_title}</div>"
# Create the card layout with the header and the panel object
card = pn.Column(
pn.pane.HTML(card_style + header_html), # Header with custom style
panel_object, # The panel object (e.g., plot or indicator)
width=styles["width"],
height=styles["height"],
sizing_mode='fixed',
margin=styles["margin"],
css_classes=['custom-card'] # Applying the custom card style
)
return card
#//////////////////////////////////////////////////////////////
################################################################
# Data update & Update cards
################################################################
#//////////////////////////////////////////////////////////////
# Function to update the cards when the data changes
def update_cards(event):
# Update data and re-render the layout
new_layout = update_data(event) # Get the updated layout from update_data
cover_dashboard[-1] = new_layout # Replace the last item in the column with the updated layout
# Attach the update_cards function to the program selection widget
program_widget_List.param.watch(update_cards, 'value') # Added 10/21
# Attach update_cards to toggle changes
toggle_lightplate.param.watch(update_cards, 'value') # Watch for changes in toggle_lightplate
toggle_others.param.watch(update_cards, 'value') # Watch for changes in toggle_others
# Data update function
def update_data(event):
selected_program = program_widget_List.value
# Filter df_Historic_dashboard based on the selected program
df_Historic_dashboard_filtered = df_Historic_dashboard[df_Historic_dashboard['Program'] == selected_program]
# Filter df_Priority_dashboard based on the selected program
df_Priority_dashboard_filtered = df_Priority_dashboard[df_Priority_dashboard['Program'] == selected_program]
# Merge df_Priority_dashboard_filtered with the filtered df_Historic_dashboard
df_Priority_dashboard_filtered = pd.merge(
df_Priority_dashboard_filtered,
df_Historic_dashboard_filtered[['Pty Indice', 'Sales USD', 'Qty Shipped']],
on='Pty Indice',
how='left'
)
# Fill NaN values with 0 and ensure integer types
df_Priority_dashboard_filtered.fillna(0, inplace=True)
df_Priority_dashboard_filtered['Qty Shipped'] = df_Priority_dashboard_filtered['Qty Shipped'].astype(int)
df_Priority_dashboard_filtered['Shipped'] = df_Priority_dashboard_filtered['Shipped'].astype(int)
# Apply filters from the toggle buttons for each DataFrame
df_Historic_dashboard_filtered = filter_dashboard(
df_Historic_dashboard_filtered,
toggle_lightplate.value,
toggle_others.value,
'Product Category' # Correct column name for Historic DataFrame
)
df_Priority_dashboard_filtered = filter_dashboard(
df_Priority_dashboard_filtered,
toggle_lightplate.value,
toggle_others.value,
'Product Category' # Correct column name for Priority DataFrame
)
# Update the plots and indicators with the filtered data
return update_plots_and_indicators(df_Historic_dashboard_filtered, df_Priority_dashboard_filtered)
# Attach the update function to the program selection widget
program_widget_List.param.watch(update_data, 'value')
# Function to update the plots and indicators
def update_plots_and_indicators(df_Historic_dashboard_filtered, df_Priority_dashboard_filtered):
# Create figures and indicators
yoy_sales_figure = create_yoy_sales_figure(df_Historic_dashboard_filtered)
yoy_shipments_figure = create_yoy_shipments_figure(df_Historic_dashboard_filtered)
#customers_shipment, customers_sales = create_customers_figure(df_Priority_dashboard_filtered, logo_mapping, logo_offset) # 10/21
customers_shipment, customers_sales = create_customers_figure(df_Priority_dashboard_filtered, logo_mapping)
yearly_metrics_indicator = create_yearly_metrics_indicator(df_Historic_dashboard_filtered)
since_inception_indicator = create_since_inception_indicator(df_Historic_dashboard_filtered)
monthly_metrics_indicator = create_monthly_metrics_indicator(df_Historic_dashboard_filtered)
# Create and display the cards with specified styles
card_yoy_sales = create_card_indicators("YoY Sales", yoy_sales_figure, card_styles["YoY Sales"])
card_yoy_shipments = create_card_indicators("YoY Shipments", yoy_shipments_figure, card_styles["YoY Shipments"])
card_customers_shipment = create_card_indicators(f"Total Shipment per customer - [{span_report_historic_dashboard}]", customers_shipment, card_styles["Total Shipment per customer"])
card_customers_sales = create_card_indicators(f"Total sales per customer - [{span_report_historic_dashboard}]", customers_sales, card_styles["Total sales per customer"])
card_yearly_metrics = create_card_indicators("Yearly KPI - Beginning current year up to today VS same period year prior", yearly_metrics_indicator, card_styles["Yearly KPI"])
card_since_inception = create_card_indicators(f"Cumulative KPI - [{span_report_historic_dashboard}]", since_inception_indicator, card_styles["Cumulative KPI"])
card_monthly_metrics = create_card_indicators("Monthly KPI - Previous month VS two month prior", monthly_metrics_indicator, card_styles["Monthly KPI"])
# Create layout for the updated dashboard
layout = pn.Column(
pn.Row(card_monthly_metrics, pn.Spacer(width=5), card_yearly_metrics, pn.Spacer(width=5), card_since_inception),
pn.Spacer(height=50), # 10/07
card_yoy_sales,
pn.Spacer(height=50), # 10/07
card_yoy_shipments,
pn.Spacer(height=50), # 10/07
pn.Row(card_customers_shipment, card_customers_sales)
)
return layout
# Manually trigger the first update to populate the dashboard initially
dashboard_layout = update_data(None) # Call the function directly to get the initial layout
# Trigger the initial filter application when the dashboard loads
apply_filters(None)
#//////////////////////////////////////////////////////////////
###############################################################
# Creating the overall layout with proper spacing and alignment
###############################################################
#//////////////////////////////////////////////////////////////
cover_dashboard_title = f"Transfer Project Dashboard [{recent_date_str}]" # update 02/19
# Create the title section for the Priority Tab
cover_dashboard_section = pn.pane.HTML(f"""
<div style='background-color: {font_top_color}; width: 100%; padding: 10px; box-sizing: border-box;'>
<h1 style='font-size: 24px; color: white; text-align: left; margin: 0;'>{cover_dashboard_title}</h1>
</div>
""", sizing_mode='stretch_width')
# updated 09/30
# Create the layout for Priority Tab
cover_dashboard = pn.Column(
cover_dashboard_section,
pn.layout.Divider(margin=(-10, 0, 0, 0)),
pn.Row(
program_widget_List, # Existing widget layout
pn.Spacer(width=50), # Space before the toggles
pn.Column( # Use Column to stack Spacer and Row for toggles
pn.Spacer(height=15), # Empty line above the toggle buttons
pn.Row(
toggle_lightplate, # Toggle buttons in the same row
pn.Spacer(width=15), # Space between the buttons
toggle_others,
pn.Spacer(width=10),
pn.pane.Markdown("""
<span style='color: #000000;'>Click to </span><span style='color: #226AB0;'><b>Include</b></span><span style='color: #000000;'> / </span><span style='color: #D9D9D9;'><b>Exclude</b></span><span style='color: #000000;'> from the Dashboard</span>
""")
)
),
sizing_mode='stretch_width' # Ensure the row stretches to fill the width
),
pn.layout.Divider(margin=(0, 0, -10, 0)),
pn.Spacer(height=5), # Optional spacing
dashboard_layout, # Include the dashboard layout directly
sizing_mode='stretch_width'
)
#|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#*****************************************************************************************************************************
##############################################################################################################################
# Tab |Clear to Build summary|
##############################################################################################################################
#*****************************************************************************************************************************
#|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
# Load file
df_Summary_Database = pd.read_excel(input_file_formatted, sheet_name='Summary', index_col=False)
# Create 'Program' within df_Summary_Database
df_Summary_Database['Program'] = df_Summary_Database['Pty Indice'].map(program_mapping)
# Clean unnecessary columns
columns_to_drop = ['IDD Top Level', 'SEDA Top Level', 'BOM_Index', 'Is_Make_Part', 'Parent IDD', 'Child IDDs' , 'Top Level sharing Components']
df_Summary_Database = df_Summary_Database.drop(columns=[col for col in columns_to_drop if col in df_Summary_Database.columns])
#----------------------------------------------------------
# Apply Phase 4-5 grouping to df_Summary_Database
#----------------------------------------------------------
if 'Program' in df_Summary_Database.columns:
phase_mask = (
df_Summary_Database['Program'].isin(['Phase 4', 'Phase 5']) &
~df_Summary_Database['Pty Indice'].str.contains('Phase5', na=False)
)
df_Summary_Database.loc[phase_mask, 'Program'] = 'Phase 4-5'
#----------------------------------------------------------
# Data Preprocessing (Same as Supply Chain)
#----------------------------------------------------------
# Handle text formatting
acronyms = ['EDA', 'PCB', 'PWB', 'CPA', 'CPSL', 'ISP', 'TBD']
df_Summary_Database['Supplier'] = df_Summary_Database['Supplier'].astype(str).apply(lambda x: title_with_acronyms(x, acronyms))
df_Summary_Database['Description'] = df_Summary_Database['Description'].astype(str).apply(lambda x: title_with_acronyms(x, acronyms))
# Convert 'Max Qty Top-Level' to integer, coercing errors to NaN
df_Summary_Database['Max Qty Top-Level'] = pd.to_numeric(df_Summary_Database['Max Qty Top-Level'], errors='coerce')
# Convert to numeric and coerce errors to NaN
df_Summary_Database['Max Qty (GS)'] = pd.to_numeric(df_Summary_Database['Max Qty (GS)'], errors='coerce')
# Replace NaN with an empty string
df_Summary_Database['Max Qty (GS)'] = df_Summary_Database['Max Qty (GS)'].apply(lambda x: '' if pd.isna(x) else int(x))
# If 'Level' column exists, set 'Qty On Hand' to empty string where Level == 0
if 'Level' in df_Summary_Database.columns and 'Qty On Hand' in df_Summary_Database.columns:
df_Summary_Database.loc[df_Summary_Database['Level'] == 0, 'Qty On Hand'] = ''
# Fill NaN values with an empty string and convert the column to string type
df_Summary_Database['Comment'] = df_Summary_Database['Comment'].fillna('').astype(str)
#df_Summary_Database['Top Level sharing Components'] = df_Summary_Database['Top Level sharing Components'].fillna('').astype(str)
df_Summary_Database['Supplier'] = df_Summary_Database['Supplier'].fillna('').astype(str)
df_Summary_Database['Description'] = df_Summary_Database['Description'].fillna('').astype(str)
# Replace NaN and 'Nan' (as string) with an empty string
df_Summary_Database = df_Summary_Database.replace(['Nan', np.nan], '')
#----------------------------------------------------------
# Styling Function
#----------------------------------------------------------
def style_summary_table(df):
if df.empty:
return pd.DataFrame().style # Return empty style if DataFrame is empty
styler = df.style.hide(axis='index') # Hide the index column
#=================================================================
# Apply dark blue (#5B9BD5) to entire rows where Level = 0 - Working
#=================================================================
if 'Level' in df.columns:
styler = styler.apply(
lambda row: [f'background-color: #5B9BD5; color: white; font-weight: bold; text-align: center;' if row['Level'] == 0 else '' for _ in row],
axis=1
)
#=================================================================
# Apply base styles: Center align all text
#=================================================================
styler = styler.set_properties(**{
'text-align': 'center',
'vertical-align': 'middle'
})
#=================================================================
# Zero quantity styling - Working
#=================================================================
if 'Max Qty (GS)' in df.columns:
red_mask = df['Max Qty (GS)'] == 0
styler = styler.apply(
lambda row: ['background-color: #FFC7CE' if red_mask.loc[row.name] else '' for _ in row],
axis=1
)
#=================================================================
# Supplier-based formatting - Working
#=================================================================
supplier_colors = {
'Make Part (Phantom)': '#D9E1F2',
'Make Part': '#D9E1F2', # Different color for demonstration
'Floor Stock Item': '#E0E0E0',
'Make Part CUU': '#CCCCFF'
}
for supplier_pattern, color in supplier_colors.items():
# Use exact match with case insensitivity
mask = df['Supplier'].str.strip().str.lower() == supplier_pattern.strip().lower()
styler = styler.apply(
lambda row, mask=mask, color=color:
[f'background-color: {color}' if mask.loc[row.name] else ''
for _ in row],
axis=1
)
#=================================================================
# Status formatting - Working
#=================================================================
status_colors = {
'Clear-to-Build': '#C6EFCE',
'Completed - No Backlog': '#6FAC46',
'Not completed - No Backlog': '#ED7D31',
'Shortage': '#FFC7CE'
}
if 'Top-Level Status' in df.columns:
styler = styler.map(
lambda v: f'background-color: {status_colors.get(v, "")}',
subset=['Top-Level Status']
)
#=================================================================
# Highlight 'Max Qty Top-Level' cells in light blue based on Level = 0 value
#=================================================================
if 'Level' in df.columns and 'Max Qty Top-Level' in df.columns:
# Get the 'Max Qty Top-Level' value for Level = 0
level_0_max_qty = df.loc[df['Level'] == 0, 'Max Qty Top-Level'].values
if len(level_0_max_qty) > 0:
level_0_max_qty = level_0_max_qty[0] # Take the first occurrence
# Apply conditional formatting for Level = 1 and rows with Level > 1
def highlight_max_qty_top_level(row):
if row['Level'] == 1 and row['Max Qty Top-Level'] == level_0_max_qty:
return ['background-color: #00B0F0' if col == 'Max Qty Top-Level' else '' for col in df.columns]
elif row['Level'] > 1 and row['Max Qty Top-Level'] == level_0_max_qty:
return ['background-color: #00B0F0' if col == 'Max Qty Top-Level' else '' for col in df.columns]
else:
return ['' for _ in df.columns]
# Apply the styling to the DataFrame
styler = styler.apply(highlight_max_qty_top_level, axis=1)
#=================================================================
# Level-based coloring - Working
#=================================================================
if 'Level' in df.columns:
level_colors = {
0: '#63BE7B', 1: '#A2C075', 2: '#FFEB84',
3: '#FFD166', 4: '#F88E5B', 5: '#F8696B', 6: '#8B0000'
}
styler = styler.map(
lambda v: f'background-color: {level_colors.get(v, "transparent")}',
subset=['Level']
)
#=================================================================
return styler
#----------------------------------------------------------
# Widget Configuration with Dependencies
#----------------------------------------------------------
# Default values
default_summary_program = 'Phase 4-5'
default_summary_priority = 6
default_summary_indice = 'P6'
# Filter functions
def filter_summary_priorities(program):
return sorted(df_Summary_Database[df_Summary_Database['Program'] == program]['Priority'].unique().tolist())
def filter_summary_indices(priority):
return sorted(df_Summary_Database[df_Summary_Database['Priority'] == priority]['Pty Indice'].unique().tolist())
# Initialize program widget
program_widget_summary = pn.widgets.Select(
name='Select Program',
options=sorted(df_Summary_Database['Program'].unique()),
value=default_summary_program
)
# Initialize priority widget with program dependency
priority_widget_summary = pn.widgets.Select(
name='Select Priority',
options=filter_summary_priorities(default_summary_program),
value=default_summary_priority
)
# Initialize indice widget with priority dependency
indice_widget_summary = pn.widgets.Select(
name='Select Pty Indice',
options=filter_summary_indices(default_summary_priority),
value=default_summary_indice
)
#----------------------------------------------------------
# Callback functions
#----------------------------------------------------------
def update_summary_priorities(event):
selected_program = program_widget_summary.value
new_priorities = filter_summary_priorities(selected_program)
priority_widget_summary.options = new_priorities
if priority_widget_summary.value not in new_priorities:
priority_widget_summary.value = new_priorities[0] if new_priorities else None
# Trigger indice update
update_summary_indices(event)
def update_summary_indices(event):
selected_priority = priority_widget_summary.value
new_indices = filter_summary_indices(selected_priority)
indice_widget_summary.options = new_indices
if indice_widget_summary.value not in new_indices:
indice_widget_summary.value = new_indices[0] if new_indices else None
# Set up watchers
program_widget_summary.param.watch(update_summary_priorities, 'value')
priority_widget_summary.param.watch(update_summary_indices, 'value')
summary_html = pn.pane.HTML(
min_height=600,
styles={
'overflow-x': 'auto',
'margin': '15px 0',
'background': 'white',
'padding': '10px',
'border': 'none' # Remove container border
},
sizing_mode='stretch_width' # This ensures it stretches
)
#----------------------------------------------------------
# Unified update function
#----------------------------------------------------------
def update_summary_table(event):
try:
filtered_df = df_Summary_Database[
(df_Summary_Database.Program == program_widget_summary.value) &
(df_Summary_Database.Priority == priority_widget_summary.value) &
(df_Summary_Database['Pty Indice'] == indice_widget_summary.value)
].drop(columns=['Pty Indice', 'Priority', 'Program'], errors='ignore')
# Add these columns if they exist in your data
if 'Level' in filtered_df.columns:
filtered_df['Level'] = pd.to_numeric(filtered_df['Level'], errors='coerce').fillna(-1).astype(int)
styled_table = style_summary_table(filtered_df)
summary_html.object = styled_table.to_html() if not filtered_df.empty else "<div>No data available</div>"
except Exception as e:
print(f"Error updating table: {str(e)}")
summary_html.object = "<div>Error loading data</div>"
# Set up final watchers
program_widget_summary.param.watch(update_summary_table, 'value')
priority_widget_summary.param.watch(update_summary_table, 'value')
indice_widget_summary.param.watch(update_summary_table, 'value')
# Initial update to populate the table
update_summary_table(None)
#----------------------------------------------------------
# Final Layout
#----------------------------------------------------------
summary_tab = pn.Column(
pn.Row(
pn.pane.HTML("""
<div style='background-color:#4472C4; padding:10px'>
<h1 style='color:white; margin:0'>Clear to Build Summary</h1>
</div>
""", sizing_mode='stretch_width'),
sizing_mode='stretch_width'
),
pn.Row(
program_widget_summary,
priority_widget_summary,
indice_widget_summary,
sizing_mode='stretch_width'
),
pn.layout.Divider(styles={'margin': '10px 0'}), # Removed border
summary_html,
sizing_mode='stretch_width',
)
#|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
#*****************************************************************************************************************************
##############################################################################################################################
# Define Tabs and serve
##############################################################################################################################
#*****************************************************************************************************************************
#||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
print('Script successfully completed')
tabs = pn.Tabs(
("Dashboard", cover_dashboard),
("Products Status", cadrans_dashboard),
("Project Progress", historic_tab),
("Clear to Build Summary", summary_tab),
("Priority List", priority_tab),
("Snapshot", Snapshot_tab)
)
# Inject custom CSS to scale down the dashboard
pn.config.raw_css = ["""
.pn-column {
transform: scale(0.8); /* Scale down to 80% of the original size */
transform-origin: top left; /* Ensure scaling starts from the top-left corner */
width: 125%; /* Compensate for the scaling to avoid empty space */
height: 125%; /* Compensate for the scaling to avoid empty space */
}
"""]
# Inject custom CSS to set the background color to white
pn.config.raw_css = ["""
body, .pn-column, .bk-root {
background-color: white !important;
}
"""]
# Render the Dashboard
tabs.servable()
print('Panel dashboard loaded')